FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "libavutil/imgutils.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/opt.h"
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "copy_block.h"
39 #include "hwaccel.h"
40 #include "idctdsp.h"
41 #include "internal.h"
42 #include "jpegtables.h"
43 #include "mjpeg.h"
44 #include "mjpegdec.h"
45 #include "jpeglsdec.h"
46 #include "profiles.h"
47 #include "put_bits.h"
48 #include "tiff.h"
49 #include "exif.h"
50 #include "bytestream.h"
51 
52 
53 static int build_vlc(VLC *vlc, const uint8_t *bits_table,
54  const uint8_t *val_table, int nb_codes,
55  int use_static, int is_ac)
56 {
57  uint8_t huff_size[256] = { 0 };
58  uint16_t huff_code[256];
59  uint16_t huff_sym[256];
60  int i;
61 
62  av_assert0(nb_codes <= 256);
63 
64  ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
65 
66  for (i = 0; i < 256; i++)
67  huff_sym[i] = i + 16 * is_ac;
68 
69  if (is_ac)
70  huff_sym[0] = 16 * 256;
71 
72  return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
73  huff_code, 2, 2, huff_sym, 2, 2, use_static);
74 }
75 
77 {
78  static const struct {
79  int class;
80  int index;
81  const uint8_t *bits;
82  const uint8_t *values;
83  int codes;
84  int length;
85  } ht[] = {
87  avpriv_mjpeg_val_dc, 12, 12 },
89  avpriv_mjpeg_val_dc, 12, 12 },
98  };
99  int i, ret;
100 
101  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
102  ret = build_vlc(&s->vlcs[ht[i].class][ht[i].index],
103  ht[i].bits, ht[i].values, ht[i].codes,
104  0, ht[i].class == 1);
105  if (ret < 0)
106  return ret;
107 
108  if (ht[i].class < 2) {
109  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
110  ht[i].bits + 1, 16);
111  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
112  ht[i].values, ht[i].length);
113  }
114  }
115 
116  return 0;
117 }
118 
120 {
121  s->buggy_avid = 1;
122  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
123  s->interlace_polarity = 1;
124  if (len > 14 && buf[12] == 2) /* 2 - PAL */
125  s->interlace_polarity = 0;
126  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
127  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
128 }
129 
130 static void init_idct(AVCodecContext *avctx)
131 {
132  MJpegDecodeContext *s = avctx->priv_data;
133 
134  ff_idctdsp_init(&s->idsp, avctx);
137 }
138 
140 {
141  MJpegDecodeContext *s = avctx->priv_data;
142  int ret;
143 
144  if (!s->picture_ptr) {
145  s->picture = av_frame_alloc();
146  if (!s->picture)
147  return AVERROR(ENOMEM);
148  s->picture_ptr = s->picture;
149  }
150 
151  s->avctx = avctx;
152  ff_blockdsp_init(&s->bdsp, avctx);
153  ff_hpeldsp_init(&s->hdsp, avctx->flags);
154  init_idct(avctx);
155  s->buffer_size = 0;
156  s->buffer = NULL;
157  s->start_code = -1;
158  s->first_picture = 1;
159  s->got_picture = 0;
160  s->org_height = avctx->coded_height;
162  avctx->colorspace = AVCOL_SPC_BT470BG;
164 
165  if ((ret = init_default_huffman_tables(s)) < 0)
166  return ret;
167 
168  if (s->extern_huff) {
169  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
170  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
171  return ret;
172  if (ff_mjpeg_decode_dht(s)) {
173  av_log(avctx, AV_LOG_ERROR,
174  "error using external huffman table, switching back to internal\n");
176  }
177  }
178  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
179  s->interlace_polarity = 1; /* bottom field first */
180  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
181  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
182  if (avctx->codec_tag == AV_RL32("MJPG"))
183  s->interlace_polarity = 1;
184  }
185 
186  if ( avctx->extradata_size > 8
187  && AV_RL32(avctx->extradata) == 0x2C
188  && AV_RL32(avctx->extradata+4) == 0x18) {
189  parse_avid(s, avctx->extradata, avctx->extradata_size);
190  }
191 
192  if (avctx->codec->id == AV_CODEC_ID_AMV)
193  s->flipped = 1;
194 
195  return 0;
196 }
197 
198 
199 /* quantize tables */
201 {
202  int len, index, i;
203 
204  len = get_bits(&s->gb, 16) - 2;
205 
206  if (8*len > get_bits_left(&s->gb)) {
207  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
208  return AVERROR_INVALIDDATA;
209  }
210 
211  while (len >= 65) {
212  int pr = get_bits(&s->gb, 4);
213  if (pr > 1) {
214  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
215  return AVERROR_INVALIDDATA;
216  }
217  index = get_bits(&s->gb, 4);
218  if (index >= 4)
219  return -1;
220  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
221  /* read quant table */
222  for (i = 0; i < 64; i++) {
223  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
224  if (s->quant_matrixes[index][i] == 0) {
225  av_log(s->avctx, AV_LOG_ERROR, "dqt: 0 quant value\n");
226  return AVERROR_INVALIDDATA;
227  }
228  }
229 
230  // XXX FIXME fine-tune, and perhaps add dc too
231  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
232  s->quant_matrixes[index][8]) >> 1;
233  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
234  index, s->qscale[index]);
235  len -= 1 + 64 * (1+pr);
236  }
237  return 0;
238 }
239 
240 /* decode huffman tables and build VLC decoders */
242 {
243  int len, index, i, class, n, v, code_max;
244  uint8_t bits_table[17];
245  uint8_t val_table[256];
246  int ret = 0;
247 
248  len = get_bits(&s->gb, 16) - 2;
249 
250  if (8*len > get_bits_left(&s->gb)) {
251  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
252  return AVERROR_INVALIDDATA;
253  }
254 
255  while (len > 0) {
256  if (len < 17)
257  return AVERROR_INVALIDDATA;
258  class = get_bits(&s->gb, 4);
259  if (class >= 2)
260  return AVERROR_INVALIDDATA;
261  index = get_bits(&s->gb, 4);
262  if (index >= 4)
263  return AVERROR_INVALIDDATA;
264  n = 0;
265  for (i = 1; i <= 16; i++) {
266  bits_table[i] = get_bits(&s->gb, 8);
267  n += bits_table[i];
268  }
269  len -= 17;
270  if (len < n || n > 256)
271  return AVERROR_INVALIDDATA;
272 
273  code_max = 0;
274  for (i = 0; i < n; i++) {
275  v = get_bits(&s->gb, 8);
276  if (v > code_max)
277  code_max = v;
278  val_table[i] = v;
279  }
280  len -= n;
281 
282  /* build VLC and flush previous vlc if present */
283  ff_free_vlc(&s->vlcs[class][index]);
284  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
285  class, index, code_max + 1);
286  if ((ret = build_vlc(&s->vlcs[class][index], bits_table, val_table,
287  code_max + 1, 0, class > 0)) < 0)
288  return ret;
289 
290  if (class > 0) {
291  ff_free_vlc(&s->vlcs[2][index]);
292  if ((ret = build_vlc(&s->vlcs[2][index], bits_table, val_table,
293  code_max + 1, 0, 0)) < 0)
294  return ret;
295  }
296 
297  for (i = 0; i < 16; i++)
298  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
299  for (i = 0; i < 256; i++)
300  s->raw_huffman_values[class][index][i] = val_table[i];
301  }
302  return 0;
303 }
304 
306 {
307  int len, nb_components, i, width, height, bits, ret, size_change;
308  unsigned pix_fmt_id;
309  int h_count[MAX_COMPONENTS] = { 0 };
310  int v_count[MAX_COMPONENTS] = { 0 };
311 
312  s->cur_scan = 0;
313  memset(s->upscale_h, 0, sizeof(s->upscale_h));
314  memset(s->upscale_v, 0, sizeof(s->upscale_v));
315 
316  len = get_bits(&s->gb, 16);
317  bits = get_bits(&s->gb, 8);
318 
319  if (bits > 16 || bits < 1) {
320  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
321  return AVERROR_INVALIDDATA;
322  }
323 
324  if (s->avctx->bits_per_raw_sample != bits) {
325  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
327  init_idct(s->avctx);
328  }
329  if (s->pegasus_rct)
330  bits = 9;
331  if (bits == 9 && !s->pegasus_rct)
332  s->rct = 1; // FIXME ugly
333 
334  if(s->lossless && s->avctx->lowres){
335  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
336  return -1;
337  }
338 
339  height = get_bits(&s->gb, 16);
340  width = get_bits(&s->gb, 16);
341 
342  // HACK for odd_height.mov
343  if (s->interlaced && s->width == width && s->height == height + 1)
344  height= s->height;
345 
346  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
347  if (av_image_check_size(width, height, 0, s->avctx) < 0)
348  return AVERROR_INVALIDDATA;
349  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
350  return AVERROR_INVALIDDATA;
351 
352  nb_components = get_bits(&s->gb, 8);
353  if (nb_components <= 0 ||
354  nb_components > MAX_COMPONENTS)
355  return -1;
356  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
357  if (nb_components != s->nb_components) {
359  "nb_components changing in interlaced picture\n");
360  return AVERROR_INVALIDDATA;
361  }
362  }
363  if (s->ls && !(bits <= 8 || nb_components == 1)) {
365  "JPEG-LS that is not <= 8 "
366  "bits/component or 16-bit gray");
367  return AVERROR_PATCHWELCOME;
368  }
369  if (len != 8 + 3 * nb_components) {
370  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
371  return AVERROR_INVALIDDATA;
372  }
373 
374  s->nb_components = nb_components;
375  s->h_max = 1;
376  s->v_max = 1;
377  for (i = 0; i < nb_components; i++) {
378  /* component id */
379  s->component_id[i] = get_bits(&s->gb, 8) - 1;
380  h_count[i] = get_bits(&s->gb, 4);
381  v_count[i] = get_bits(&s->gb, 4);
382  /* compute hmax and vmax (only used in interleaved case) */
383  if (h_count[i] > s->h_max)
384  s->h_max = h_count[i];
385  if (v_count[i] > s->v_max)
386  s->v_max = v_count[i];
387  s->quant_index[i] = get_bits(&s->gb, 8);
388  if (s->quant_index[i] >= 4) {
389  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
390  return AVERROR_INVALIDDATA;
391  }
392  if (!h_count[i] || !v_count[i]) {
394  "Invalid sampling factor in component %d %d:%d\n",
395  i, h_count[i], v_count[i]);
396  return AVERROR_INVALIDDATA;
397  }
398 
399  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
400  i, h_count[i], v_count[i],
401  s->component_id[i], s->quant_index[i]);
402  }
403  if ( nb_components == 4
404  && s->component_id[0] == 'C' - 1
405  && s->component_id[1] == 'M' - 1
406  && s->component_id[2] == 'Y' - 1
407  && s->component_id[3] == 'K' - 1)
408  s->adobe_transform = 0;
409 
410  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
411  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
412  return AVERROR_PATCHWELCOME;
413  }
414 
415 
416  /* if different size, realloc/alloc picture */
417  if (width != s->width || height != s->height || bits != s->bits ||
418  memcmp(s->h_count, h_count, sizeof(h_count)) ||
419  memcmp(s->v_count, v_count, sizeof(v_count))) {
420  size_change = 1;
421 
422  s->width = width;
423  s->height = height;
424  s->bits = bits;
425  memcpy(s->h_count, h_count, sizeof(h_count));
426  memcpy(s->v_count, v_count, sizeof(v_count));
427  s->interlaced = 0;
428  s->got_picture = 0;
429 
430  /* test interlaced mode */
431  if (s->first_picture &&
432  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
433  s->org_height != 0 &&
434  s->height < ((s->org_height * 3) / 4)) {
435  s->interlaced = 1;
439  height *= 2;
440  }
441 
442  ret = ff_set_dimensions(s->avctx, width, height);
443  if (ret < 0)
444  return ret;
445 
446  s->first_picture = 0;
447  } else {
448  size_change = 0;
449  }
450 
451  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
452  if (s->progressive) {
453  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
454  return AVERROR_INVALIDDATA;
455  }
456  } else {
457  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
458  s->rgb = 1;
459  else if (!s->lossless)
460  s->rgb = 0;
461  /* XXX: not complete test ! */
462  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
463  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
464  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
465  (s->h_count[3] << 4) | s->v_count[3];
466  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
467  /* NOTE we do not allocate pictures large enough for the possible
468  * padding of h/v_count being 4 */
469  if (!(pix_fmt_id & 0xD0D0D0D0))
470  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
471  if (!(pix_fmt_id & 0x0D0D0D0D))
472  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
473 
474  for (i = 0; i < 8; i++) {
475  int j = 6 + (i&1) - (i&6);
476  int is = (pix_fmt_id >> (4*i)) & 0xF;
477  int js = (pix_fmt_id >> (4*j)) & 0xF;
478 
479  if (is == 1 && js != 2 && (i < 2 || i > 5))
480  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
481  if (is == 1 && js != 2 && (i < 2 || i > 5))
482  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
483 
484  if (is == 1 && js == 2) {
485  if (i & 1) s->upscale_h[j/2] = 1;
486  else s->upscale_v[j/2] = 1;
487  }
488  }
489 
490  switch (pix_fmt_id) {
491  case 0x11111100:
492  if (s->rgb)
494  else {
495  if ( s->adobe_transform == 0
496  || s->component_id[0] == 'R' - 1 && s->component_id[1] == 'G' - 1 && s->component_id[2] == 'B' - 1) {
498  } else {
502  }
503  }
504  av_assert0(s->nb_components == 3);
505  break;
506  case 0x11111111:
507  if (s->rgb)
509  else {
510  if (s->adobe_transform == 0 && s->bits <= 8) {
512  } else {
515  }
516  }
517  av_assert0(s->nb_components == 4);
518  break;
519  case 0x22111122:
520  case 0x22111111:
521  if (s->adobe_transform == 0 && s->bits <= 8) {
523  s->upscale_v[1] = s->upscale_v[2] = 1;
524  s->upscale_h[1] = s->upscale_h[2] = 1;
525  } else if (s->adobe_transform == 2 && s->bits <= 8) {
527  s->upscale_v[1] = s->upscale_v[2] = 1;
528  s->upscale_h[1] = s->upscale_h[2] = 1;
530  } else {
531  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
534  }
535  av_assert0(s->nb_components == 4);
536  break;
537  case 0x12121100:
538  case 0x22122100:
539  case 0x21211100:
540  case 0x22211200:
542  else
543  goto unk_pixfmt;
545  break;
546  case 0x22221100:
547  case 0x22112200:
548  case 0x11222200:
550  else
551  goto unk_pixfmt;
553  break;
554  case 0x11000000:
555  case 0x13000000:
556  case 0x14000000:
557  case 0x31000000:
558  case 0x33000000:
559  case 0x34000000:
560  case 0x41000000:
561  case 0x43000000:
562  case 0x44000000:
563  if(s->bits <= 8)
565  else
567  break;
568  case 0x12111100:
569  case 0x14121200:
570  case 0x14111100:
571  case 0x22211100:
572  case 0x22112100:
573  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
574  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
575  else
576  goto unk_pixfmt;
577  s->upscale_v[0] = s->upscale_v[1] = 1;
578  } else {
579  if (pix_fmt_id == 0x14111100)
580  s->upscale_v[1] = s->upscale_v[2] = 1;
582  else
583  goto unk_pixfmt;
585  }
586  break;
587  case 0x21111100:
588  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
589  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
590  else
591  goto unk_pixfmt;
592  s->upscale_h[0] = s->upscale_h[1] = 1;
593  } else {
597  }
598  break;
599  case 0x31111100:
600  if (s->bits > 8)
601  goto unk_pixfmt;
604  s->upscale_h[1] = s->upscale_h[2] = 2;
605  break;
606  case 0x22121100:
607  case 0x22111200:
609  else
610  goto unk_pixfmt;
612  break;
613  case 0x22111100:
614  case 0x23111100:
615  case 0x42111100:
616  case 0x24111100:
620  if (pix_fmt_id == 0x42111100) {
621  if (s->bits > 8)
622  goto unk_pixfmt;
623  s->upscale_h[1] = s->upscale_h[2] = 1;
624  } else if (pix_fmt_id == 0x24111100) {
625  if (s->bits > 8)
626  goto unk_pixfmt;
627  s->upscale_v[1] = s->upscale_v[2] = 1;
628  } else if (pix_fmt_id == 0x23111100) {
629  if (s->bits > 8)
630  goto unk_pixfmt;
631  s->upscale_v[1] = s->upscale_v[2] = 2;
632  }
633  break;
634  case 0x41111100:
636  else
637  goto unk_pixfmt;
639  break;
640  default:
641  unk_pixfmt:
642  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
643  memset(s->upscale_h, 0, sizeof(s->upscale_h));
644  memset(s->upscale_v, 0, sizeof(s->upscale_v));
645  return AVERROR_PATCHWELCOME;
646  }
647  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
648  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
649  return AVERROR_PATCHWELCOME;
650  }
651  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
652  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
653  return AVERROR_PATCHWELCOME;
654  }
655  if (s->ls) {
656  memset(s->upscale_h, 0, sizeof(s->upscale_h));
657  memset(s->upscale_v, 0, sizeof(s->upscale_v));
658  if (s->nb_components == 3) {
660  } else if (s->nb_components != 1) {
661  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
662  return AVERROR_PATCHWELCOME;
663  } else if (s->palette_index && s->bits <= 8)
665  else if (s->bits <= 8)
667  else
669  }
670 
672  if (!s->pix_desc) {
673  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
674  return AVERROR_BUG;
675  }
676 
677  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
678  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
679  } else {
680  enum AVPixelFormat pix_fmts[] = {
681 #if CONFIG_MJPEG_NVDEC_HWACCEL
683 #endif
684 #if CONFIG_MJPEG_VAAPI_HWACCEL
686 #endif
687  s->avctx->pix_fmt,
689  };
690  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
691  if (s->hwaccel_pix_fmt < 0)
692  return AVERROR(EINVAL);
693 
695  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
696  }
697 
698  if (s->avctx->skip_frame == AVDISCARD_ALL) {
700  s->picture_ptr->key_frame = 1;
701  s->got_picture = 1;
702  return 0;
703  }
704 
707  return -1;
709  s->picture_ptr->key_frame = 1;
710  s->got_picture = 1;
711 
712  for (i = 0; i < 4; i++)
713  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
714 
715  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
716  s->width, s->height, s->linesize[0], s->linesize[1],
717  s->interlaced, s->avctx->height);
718 
719  }
720 
721  if ((s->rgb && !s->lossless && !s->ls) ||
722  (!s->rgb && s->ls && s->nb_components > 1) ||
723  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
724  ) {
725  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
726  return AVERROR_PATCHWELCOME;
727  }
728 
729  /* totally blank picture as progressive JPEG will only add details to it */
730  if (s->progressive) {
731  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
732  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
733  for (i = 0; i < s->nb_components; i++) {
734  int size = bw * bh * s->h_count[i] * s->v_count[i];
735  av_freep(&s->blocks[i]);
736  av_freep(&s->last_nnz[i]);
737  s->blocks[i] = av_mallocz_array(size, sizeof(**s->blocks));
738  s->last_nnz[i] = av_mallocz_array(size, sizeof(**s->last_nnz));
739  if (!s->blocks[i] || !s->last_nnz[i])
740  return AVERROR(ENOMEM);
741  s->block_stride[i] = bw * s->h_count[i];
742  }
743  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
744  }
745 
746  if (s->avctx->hwaccel) {
749  if (!s->hwaccel_picture_private)
750  return AVERROR(ENOMEM);
751 
752  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
754  if (ret < 0)
755  return ret;
756  }
757 
758  return 0;
759 }
760 
761 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
762 {
763  int code;
764  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
765  if (code < 0 || code > 16) {
767  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
768  0, dc_index, &s->vlcs[0][dc_index]);
769  return 0xfffff;
770  }
771 
772  if (code)
773  return get_xbits(&s->gb, code);
774  else
775  return 0;
776 }
777 
778 /* decode block and dequantize */
779 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
780  int dc_index, int ac_index, uint16_t *quant_matrix)
781 {
782  int code, i, j, level, val;
783 
784  /* DC coef */
785  val = mjpeg_decode_dc(s, dc_index);
786  if (val == 0xfffff) {
787  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
788  return AVERROR_INVALIDDATA;
789  }
790  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
791  val = av_clip_int16(val);
792  s->last_dc[component] = val;
793  block[0] = val;
794  /* AC coefs */
795  i = 0;
796  {OPEN_READER(re, &s->gb);
797  do {
798  UPDATE_CACHE(re, &s->gb);
799  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
800 
801  i += ((unsigned)code) >> 4;
802  code &= 0xf;
803  if (code) {
804  if (code > MIN_CACHE_BITS - 16)
805  UPDATE_CACHE(re, &s->gb);
806 
807  {
808  int cache = GET_CACHE(re, &s->gb);
809  int sign = (~cache) >> 31;
810  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
811  }
812 
813  LAST_SKIP_BITS(re, &s->gb, code);
814 
815  if (i > 63) {
816  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
817  return AVERROR_INVALIDDATA;
818  }
819  j = s->scantable.permutated[i];
820  block[j] = level * quant_matrix[i];
821  }
822  } while (i < 63);
823  CLOSE_READER(re, &s->gb);}
824 
825  return 0;
826 }
827 
829  int component, int dc_index,
830  uint16_t *quant_matrix, int Al)
831 {
832  unsigned val;
833  s->bdsp.clear_block(block);
834  val = mjpeg_decode_dc(s, dc_index);
835  if (val == 0xfffff) {
836  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
837  return AVERROR_INVALIDDATA;
838  }
839  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
840  s->last_dc[component] = val;
841  block[0] = val;
842  return 0;
843 }
844 
845 /* decode block and dequantize - progressive JPEG version */
847  uint8_t *last_nnz, int ac_index,
848  uint16_t *quant_matrix,
849  int ss, int se, int Al, int *EOBRUN)
850 {
851  int code, i, j, val, run;
852  unsigned level;
853 
854  if (*EOBRUN) {
855  (*EOBRUN)--;
856  return 0;
857  }
858 
859  {
860  OPEN_READER(re, &s->gb);
861  for (i = ss; ; i++) {
862  UPDATE_CACHE(re, &s->gb);
863  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
864 
865  run = ((unsigned) code) >> 4;
866  code &= 0xF;
867  if (code) {
868  i += run;
869  if (code > MIN_CACHE_BITS - 16)
870  UPDATE_CACHE(re, &s->gb);
871 
872  {
873  int cache = GET_CACHE(re, &s->gb);
874  int sign = (~cache) >> 31;
875  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
876  }
877 
878  LAST_SKIP_BITS(re, &s->gb, code);
879 
880  if (i >= se) {
881  if (i == se) {
882  j = s->scantable.permutated[se];
883  block[j] = level * (quant_matrix[se] << Al);
884  break;
885  }
886  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
887  return AVERROR_INVALIDDATA;
888  }
889  j = s->scantable.permutated[i];
890  block[j] = level * (quant_matrix[i] << Al);
891  } else {
892  if (run == 0xF) {// ZRL - skip 15 coefficients
893  i += 15;
894  if (i >= se) {
895  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
896  return AVERROR_INVALIDDATA;
897  }
898  } else {
899  val = (1 << run);
900  if (run) {
901  UPDATE_CACHE(re, &s->gb);
902  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
903  LAST_SKIP_BITS(re, &s->gb, run);
904  }
905  *EOBRUN = val - 1;
906  break;
907  }
908  }
909  }
910  CLOSE_READER(re, &s->gb);
911  }
912 
913  if (i > *last_nnz)
914  *last_nnz = i;
915 
916  return 0;
917 }
918 
919 #define REFINE_BIT(j) { \
920  UPDATE_CACHE(re, &s->gb); \
921  sign = block[j] >> 15; \
922  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
923  ((quant_matrix[i] ^ sign) - sign) << Al; \
924  LAST_SKIP_BITS(re, &s->gb, 1); \
925 }
926 
927 #define ZERO_RUN \
928 for (; ; i++) { \
929  if (i > last) { \
930  i += run; \
931  if (i > se) { \
932  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
933  return -1; \
934  } \
935  break; \
936  } \
937  j = s->scantable.permutated[i]; \
938  if (block[j]) \
939  REFINE_BIT(j) \
940  else if (run-- == 0) \
941  break; \
942 }
943 
944 /* decode block and dequantize - progressive JPEG refinement pass */
946  uint8_t *last_nnz,
947  int ac_index, uint16_t *quant_matrix,
948  int ss, int se, int Al, int *EOBRUN)
949 {
950  int code, i = ss, j, sign, val, run;
951  int last = FFMIN(se, *last_nnz);
952 
953  OPEN_READER(re, &s->gb);
954  if (*EOBRUN) {
955  (*EOBRUN)--;
956  } else {
957  for (; ; i++) {
958  UPDATE_CACHE(re, &s->gb);
959  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
960 
961  if (code & 0xF) {
962  run = ((unsigned) code) >> 4;
963  UPDATE_CACHE(re, &s->gb);
964  val = SHOW_UBITS(re, &s->gb, 1);
965  LAST_SKIP_BITS(re, &s->gb, 1);
966  ZERO_RUN;
967  j = s->scantable.permutated[i];
968  val--;
969  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
970  if (i == se) {
971  if (i > *last_nnz)
972  *last_nnz = i;
973  CLOSE_READER(re, &s->gb);
974  return 0;
975  }
976  } else {
977  run = ((unsigned) code) >> 4;
978  if (run == 0xF) {
979  ZERO_RUN;
980  } else {
981  val = run;
982  run = (1 << run);
983  if (val) {
984  UPDATE_CACHE(re, &s->gb);
985  run += SHOW_UBITS(re, &s->gb, val);
986  LAST_SKIP_BITS(re, &s->gb, val);
987  }
988  *EOBRUN = run - 1;
989  break;
990  }
991  }
992  }
993 
994  if (i > *last_nnz)
995  *last_nnz = i;
996  }
997 
998  for (; i <= last; i++) {
999  j = s->scantable.permutated[i];
1000  if (block[j])
1001  REFINE_BIT(j)
1002  }
1003  CLOSE_READER(re, &s->gb);
1004 
1005  return 0;
1006 }
1007 #undef REFINE_BIT
1008 #undef ZERO_RUN
1009 
1010 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1011 {
1012  int i;
1013  int reset = 0;
1014 
1015  if (s->restart_interval) {
1016  s->restart_count--;
1017  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1018  align_get_bits(&s->gb);
1019  for (i = 0; i < nb_components; i++) /* reset dc */
1020  s->last_dc[i] = (4 << s->bits);
1021  }
1022 
1023  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1024  /* skip RSTn */
1025  if (s->restart_count == 0) {
1026  if( show_bits(&s->gb, i) == (1 << i) - 1
1027  || show_bits(&s->gb, i) == 0xFF) {
1028  int pos = get_bits_count(&s->gb);
1029  align_get_bits(&s->gb);
1030  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1031  skip_bits(&s->gb, 8);
1032  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1033  for (i = 0; i < nb_components; i++) /* reset dc */
1034  s->last_dc[i] = (4 << s->bits);
1035  reset = 1;
1036  } else
1037  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1038  }
1039  }
1040  }
1041  return reset;
1042 }
1043 
1044 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1045 {
1046  int i, mb_x, mb_y;
1047  uint16_t (*buffer)[4];
1048  int left[4], top[4], topleft[4];
1049  const int linesize = s->linesize[0];
1050  const int mask = ((1 << s->bits) - 1) << point_transform;
1051  int resync_mb_y = 0;
1052  int resync_mb_x = 0;
1053 
1054  if (s->nb_components != 3 && s->nb_components != 4)
1055  return AVERROR_INVALIDDATA;
1056  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1057  return AVERROR_INVALIDDATA;
1058 
1059 
1061 
1063  (unsigned)s->mb_width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1064  if (!s->ljpeg_buffer)
1065  return AVERROR(ENOMEM);
1066 
1067  buffer = s->ljpeg_buffer;
1068 
1069  for (i = 0; i < 4; i++)
1070  buffer[0][i] = 1 << (s->bits - 1);
1071 
1072  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1073  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1074 
1075  if (s->interlaced && s->bottom_field)
1076  ptr += linesize >> 1;
1077 
1078  for (i = 0; i < 4; i++)
1079  top[i] = left[i] = topleft[i] = buffer[0][i];
1080 
1081  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1082  int modified_predictor = predictor;
1083 
1084  if (get_bits_left(&s->gb) < 1) {
1085  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1086  return AVERROR_INVALIDDATA;
1087  }
1088 
1089  if (s->restart_interval && !s->restart_count){
1091  resync_mb_x = mb_x;
1092  resync_mb_y = mb_y;
1093  for(i=0; i<4; i++)
1094  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1095  }
1096  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1097  modified_predictor = 1;
1098 
1099  for (i=0;i<nb_components;i++) {
1100  int pred, dc;
1101 
1102  topleft[i] = top[i];
1103  top[i] = buffer[mb_x][i];
1104 
1105  PREDICT(pred, topleft[i], top[i], left[i], modified_predictor);
1106 
1107  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1108  if(dc == 0xFFFFF)
1109  return -1;
1110 
1111  left[i] = buffer[mb_x][i] =
1112  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1113  }
1114 
1115  if (s->restart_interval && !--s->restart_count) {
1116  align_get_bits(&s->gb);
1117  skip_bits(&s->gb, 16); /* skip RSTn */
1118  }
1119  }
1120  if (s->rct && s->nb_components == 4) {
1121  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1122  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1123  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1124  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1125  ptr[4*mb_x + 0] = buffer[mb_x][3];
1126  }
1127  } else if (s->nb_components == 4) {
1128  for(i=0; i<nb_components; i++) {
1129  int c= s->comp_index[i];
1130  if (s->bits <= 8) {
1131  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1132  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1133  }
1134  } else if(s->bits == 9) {
1135  return AVERROR_PATCHWELCOME;
1136  } else {
1137  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1138  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1139  }
1140  }
1141  }
1142  } else if (s->rct) {
1143  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1144  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1145  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1146  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1147  }
1148  } else if (s->pegasus_rct) {
1149  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1150  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1151  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1152  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1153  }
1154  } else {
1155  for(i=0; i<nb_components; i++) {
1156  int c= s->comp_index[i];
1157  if (s->bits <= 8) {
1158  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1159  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1160  }
1161  } else if(s->bits == 9) {
1162  return AVERROR_PATCHWELCOME;
1163  } else {
1164  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1165  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1166  }
1167  }
1168  }
1169  }
1170  }
1171  return 0;
1172 }
1173 
1174 static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
1175  int point_transform, int nb_components)
1176 {
1177  int i, mb_x, mb_y, mask;
1178  int bits= (s->bits+7)&~7;
1179  int resync_mb_y = 0;
1180  int resync_mb_x = 0;
1181 
1182  point_transform += bits - s->bits;
1183  mask = ((1 << s->bits) - 1) << point_transform;
1184 
1185  av_assert0(nb_components>=1 && nb_components<=4);
1186 
1187  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1188  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1189  if (get_bits_left(&s->gb) < 1) {
1190  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1191  return AVERROR_INVALIDDATA;
1192  }
1193  if (s->restart_interval && !s->restart_count){
1195  resync_mb_x = mb_x;
1196  resync_mb_y = mb_y;
1197  }
1198 
1199  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1200  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1201  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1202  for (i = 0; i < nb_components; i++) {
1203  uint8_t *ptr;
1204  uint16_t *ptr16;
1205  int n, h, v, x, y, c, j, linesize;
1206  n = s->nb_blocks[i];
1207  c = s->comp_index[i];
1208  h = s->h_scount[i];
1209  v = s->v_scount[i];
1210  x = 0;
1211  y = 0;
1212  linesize= s->linesize[c];
1213 
1214  if(bits>8) linesize /= 2;
1215 
1216  for(j=0; j<n; j++) {
1217  int pred, dc;
1218 
1219  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1220  if(dc == 0xFFFFF)
1221  return -1;
1222  if ( h * mb_x + x >= s->width
1223  || v * mb_y + y >= s->height) {
1224  // Nothing to do
1225  } else if (bits<=8) {
1226  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1227  if(y==0 && toprow){
1228  if(x==0 && leftcol){
1229  pred= 1 << (bits - 1);
1230  }else{
1231  pred= ptr[-1];
1232  }
1233  }else{
1234  if(x==0 && leftcol){
1235  pred= ptr[-linesize];
1236  }else{
1237  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1238  }
1239  }
1240 
1241  if (s->interlaced && s->bottom_field)
1242  ptr += linesize >> 1;
1243  pred &= mask;
1244  *ptr= pred + ((unsigned)dc << point_transform);
1245  }else{
1246  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1247  if(y==0 && toprow){
1248  if(x==0 && leftcol){
1249  pred= 1 << (bits - 1);
1250  }else{
1251  pred= ptr16[-1];
1252  }
1253  }else{
1254  if(x==0 && leftcol){
1255  pred= ptr16[-linesize];
1256  }else{
1257  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1258  }
1259  }
1260 
1261  if (s->interlaced && s->bottom_field)
1262  ptr16 += linesize >> 1;
1263  pred &= mask;
1264  *ptr16= pred + ((unsigned)dc << point_transform);
1265  }
1266  if (++x == h) {
1267  x = 0;
1268  y++;
1269  }
1270  }
1271  }
1272  } else {
1273  for (i = 0; i < nb_components; i++) {
1274  uint8_t *ptr;
1275  uint16_t *ptr16;
1276  int n, h, v, x, y, c, j, linesize, dc;
1277  n = s->nb_blocks[i];
1278  c = s->comp_index[i];
1279  h = s->h_scount[i];
1280  v = s->v_scount[i];
1281  x = 0;
1282  y = 0;
1283  linesize = s->linesize[c];
1284 
1285  if(bits>8) linesize /= 2;
1286 
1287  for (j = 0; j < n; j++) {
1288  int pred;
1289 
1290  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1291  if(dc == 0xFFFFF)
1292  return -1;
1293  if ( h * mb_x + x >= s->width
1294  || v * mb_y + y >= s->height) {
1295  // Nothing to do
1296  } else if (bits<=8) {
1297  ptr = s->picture_ptr->data[c] +
1298  (linesize * (v * mb_y + y)) +
1299  (h * mb_x + x); //FIXME optimize this crap
1300  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1301 
1302  pred &= mask;
1303  *ptr = pred + ((unsigned)dc << point_transform);
1304  }else{
1305  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1306  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1307 
1308  pred &= mask;
1309  *ptr16= pred + ((unsigned)dc << point_transform);
1310  }
1311 
1312  if (++x == h) {
1313  x = 0;
1314  y++;
1315  }
1316  }
1317  }
1318  }
1319  if (s->restart_interval && !--s->restart_count) {
1320  align_get_bits(&s->gb);
1321  skip_bits(&s->gb, 16); /* skip RSTn */
1322  }
1323  }
1324  }
1325  return 0;
1326 }
1327 
1329  uint8_t *dst, const uint8_t *src,
1330  int linesize, int lowres)
1331 {
1332  switch (lowres) {
1333  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1334  break;
1335  case 1: copy_block4(dst, src, linesize, linesize, 4);
1336  break;
1337  case 2: copy_block2(dst, src, linesize, linesize, 2);
1338  break;
1339  case 3: *dst = *src;
1340  break;
1341  }
1342 }
1343 
1344 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1345 {
1346  int block_x, block_y;
1347  int size = 8 >> s->avctx->lowres;
1348  if (s->bits > 8) {
1349  for (block_y=0; block_y<size; block_y++)
1350  for (block_x=0; block_x<size; block_x++)
1351  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1352  } else {
1353  for (block_y=0; block_y<size; block_y++)
1354  for (block_x=0; block_x<size; block_x++)
1355  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1356  }
1357 }
1358 
1359 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1360  int Al, const uint8_t *mb_bitmask,
1361  int mb_bitmask_size,
1362  const AVFrame *reference)
1363 {
1364  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1366  const uint8_t *reference_data[MAX_COMPONENTS];
1367  int linesize[MAX_COMPONENTS];
1368  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1369  int bytes_per_pixel = 1 + (s->bits > 8);
1370 
1371  if (mb_bitmask) {
1372  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1373  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1374  return AVERROR_INVALIDDATA;
1375  }
1376  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1377  }
1378 
1379  s->restart_count = 0;
1380 
1381  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1382  &chroma_v_shift);
1383  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1384  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1385 
1386  for (i = 0; i < nb_components; i++) {
1387  int c = s->comp_index[i];
1388  data[c] = s->picture_ptr->data[c];
1389  reference_data[c] = reference ? reference->data[c] : NULL;
1390  linesize[c] = s->linesize[c];
1391  s->coefs_finished[c] |= 1;
1392  }
1393 
1394  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1395  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1396  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1397 
1398  if (s->restart_interval && !s->restart_count)
1400 
1401  if (get_bits_left(&s->gb) < 0) {
1402  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1403  -get_bits_left(&s->gb));
1404  return AVERROR_INVALIDDATA;
1405  }
1406  for (i = 0; i < nb_components; i++) {
1407  uint8_t *ptr;
1408  int n, h, v, x, y, c, j;
1409  int block_offset;
1410  n = s->nb_blocks[i];
1411  c = s->comp_index[i];
1412  h = s->h_scount[i];
1413  v = s->v_scount[i];
1414  x = 0;
1415  y = 0;
1416  for (j = 0; j < n; j++) {
1417  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1418  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1419 
1420  if (s->interlaced && s->bottom_field)
1421  block_offset += linesize[c] >> 1;
1422  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1423  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1424  ptr = data[c] + block_offset;
1425  } else
1426  ptr = NULL;
1427  if (!s->progressive) {
1428  if (copy_mb) {
1429  if (ptr)
1430  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1431  linesize[c], s->avctx->lowres);
1432 
1433  } else {
1434  s->bdsp.clear_block(s->block);
1435  if (decode_block(s, s->block, i,
1436  s->dc_index[i], s->ac_index[i],
1437  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1439  "error y=%d x=%d\n", mb_y, mb_x);
1440  return AVERROR_INVALIDDATA;
1441  }
1442  if (ptr) {
1443  s->idsp.idct_put(ptr, linesize[c], s->block);
1444  if (s->bits & 7)
1445  shift_output(s, ptr, linesize[c]);
1446  }
1447  }
1448  } else {
1449  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1450  (h * mb_x + x);
1451  int16_t *block = s->blocks[c][block_idx];
1452  if (Ah)
1453  block[0] += get_bits1(&s->gb) *
1454  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1455  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1456  s->quant_matrixes[s->quant_sindex[i]],
1457  Al) < 0) {
1459  "error y=%d x=%d\n", mb_y, mb_x);
1460  return AVERROR_INVALIDDATA;
1461  }
1462  }
1463  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1464  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1465  mb_x, mb_y, x, y, c, s->bottom_field,
1466  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1467  if (++x == h) {
1468  x = 0;
1469  y++;
1470  }
1471  }
1472  }
1473 
1474  handle_rstn(s, nb_components);
1475  }
1476  }
1477  return 0;
1478 }
1479 
1481  int se, int Ah, int Al)
1482 {
1483  int mb_x, mb_y;
1484  int EOBRUN = 0;
1485  int c = s->comp_index[0];
1486  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1487 
1488  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1489  if (se < ss || se > 63) {
1490  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1491  return AVERROR_INVALIDDATA;
1492  }
1493 
1494  // s->coefs_finished is a bitmask for coefficients coded
1495  // ss and se are parameters telling start and end coefficients
1496  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1497 
1498  s->restart_count = 0;
1499 
1500  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1501  int block_idx = mb_y * s->block_stride[c];
1502  int16_t (*block)[64] = &s->blocks[c][block_idx];
1503  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1504  if (get_bits_left(&s->gb) <= 0) {
1505  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1506  return AVERROR_INVALIDDATA;
1507  }
1508  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1509  int ret;
1510  if (s->restart_interval && !s->restart_count)
1512 
1513  if (Ah)
1514  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1515  quant_matrix, ss, se, Al, &EOBRUN);
1516  else
1517  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1518  quant_matrix, ss, se, Al, &EOBRUN);
1519  if (ret < 0) {
1521  "error y=%d x=%d\n", mb_y, mb_x);
1522  return AVERROR_INVALIDDATA;
1523  }
1524 
1525  if (handle_rstn(s, 0))
1526  EOBRUN = 0;
1527  }
1528  }
1529  return 0;
1530 }
1531 
1533 {
1534  int mb_x, mb_y;
1535  int c;
1536  const int bytes_per_pixel = 1 + (s->bits > 8);
1537  const int block_size = s->lossless ? 1 : 8;
1538 
1539  for (c = 0; c < s->nb_components; c++) {
1540  uint8_t *data = s->picture_ptr->data[c];
1541  int linesize = s->linesize[c];
1542  int h = s->h_max / s->h_count[c];
1543  int v = s->v_max / s->v_count[c];
1544  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1545  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1546 
1547  if (~s->coefs_finished[c])
1548  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1549 
1550  if (s->interlaced && s->bottom_field)
1551  data += linesize >> 1;
1552 
1553  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1554  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1555  int block_idx = mb_y * s->block_stride[c];
1556  int16_t (*block)[64] = &s->blocks[c][block_idx];
1557  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1558  s->idsp.idct_put(ptr, linesize, *block);
1559  if (s->bits & 7)
1560  shift_output(s, ptr, linesize);
1561  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1562  }
1563  }
1564  }
1565 }
1566 
1568  int mb_bitmask_size, const AVFrame *reference)
1569 {
1570  int len, nb_components, i, h, v, predictor, point_transform;
1571  int index, id, ret;
1572  const int block_size = s->lossless ? 1 : 8;
1573  int ilv, prev_shift;
1574 
1575  if (!s->got_picture) {
1577  "Can not process SOS before SOF, skipping\n");
1578  return -1;
1579  }
1580 
1581  if (reference) {
1582  if (reference->width != s->picture_ptr->width ||
1583  reference->height != s->picture_ptr->height ||
1584  reference->format != s->picture_ptr->format) {
1585  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1586  return AVERROR_INVALIDDATA;
1587  }
1588  }
1589 
1590  /* XXX: verify len field validity */
1591  len = get_bits(&s->gb, 16);
1592  nb_components = get_bits(&s->gb, 8);
1593  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1595  "decode_sos: nb_components (%d)",
1596  nb_components);
1597  return AVERROR_PATCHWELCOME;
1598  }
1599  if (len != 6 + 2 * nb_components) {
1600  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1601  return AVERROR_INVALIDDATA;
1602  }
1603  for (i = 0; i < nb_components; i++) {
1604  id = get_bits(&s->gb, 8) - 1;
1605  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1606  /* find component index */
1607  for (index = 0; index < s->nb_components; index++)
1608  if (id == s->component_id[index])
1609  break;
1610  if (index == s->nb_components) {
1612  "decode_sos: index(%d) out of components\n", index);
1613  return AVERROR_INVALIDDATA;
1614  }
1615  /* Metasoft MJPEG codec has Cb and Cr swapped */
1616  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1617  && nb_components == 3 && s->nb_components == 3 && i)
1618  index = 3 - i;
1619 
1620  s->quant_sindex[i] = s->quant_index[index];
1621  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1622  s->h_scount[i] = s->h_count[index];
1623  s->v_scount[i] = s->v_count[index];
1624 
1625  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1626  index = (index+2)%3;
1627 
1628  s->comp_index[i] = index;
1629 
1630  s->dc_index[i] = get_bits(&s->gb, 4);
1631  s->ac_index[i] = get_bits(&s->gb, 4);
1632 
1633  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1634  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1635  goto out_of_range;
1636  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1637  goto out_of_range;
1638  }
1639 
1640  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1641  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1642  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1643  prev_shift = get_bits(&s->gb, 4); /* Ah */
1644  point_transform = get_bits(&s->gb, 4); /* Al */
1645  }else
1646  prev_shift = point_transform = 0;
1647 
1648  if (nb_components > 1) {
1649  /* interleaved stream */
1650  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1651  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1652  } else if (!s->ls) { /* skip this for JPEG-LS */
1653  h = s->h_max / s->h_scount[0];
1654  v = s->v_max / s->v_scount[0];
1655  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1656  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1657  s->nb_blocks[0] = 1;
1658  s->h_scount[0] = 1;
1659  s->v_scount[0] = 1;
1660  }
1661 
1662  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1663  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1664  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1665  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1666  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1667 
1668 
1669  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1670  for (i = s->mjpb_skiptosod; i > 0; i--)
1671  skip_bits(&s->gb, 8);
1672 
1673 next_field:
1674  for (i = 0; i < nb_components; i++)
1675  s->last_dc[i] = (4 << s->bits);
1676 
1677  if (s->avctx->hwaccel) {
1678  int bytes_to_start = get_bits_count(&s->gb) / 8;
1679  av_assert0(bytes_to_start >= 0 &&
1680  s->raw_scan_buffer_size >= bytes_to_start);
1681 
1682  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1683  s->raw_scan_buffer + bytes_to_start,
1684  s->raw_scan_buffer_size - bytes_to_start);
1685  if (ret < 0)
1686  return ret;
1687 
1688  } else if (s->lossless) {
1689  av_assert0(s->picture_ptr == s->picture);
1690  if (CONFIG_JPEGLS_DECODER && s->ls) {
1691 // for () {
1692 // reset_ls_coding_parameters(s, 0);
1693 
1694  if ((ret = ff_jpegls_decode_picture(s, predictor,
1695  point_transform, ilv)) < 0)
1696  return ret;
1697  } else {
1698  if (s->rgb) {
1699  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1700  return ret;
1701  } else {
1702  if ((ret = ljpeg_decode_yuv_scan(s, predictor,
1703  point_transform,
1704  nb_components)) < 0)
1705  return ret;
1706  }
1707  }
1708  } else {
1709  if (s->progressive && predictor) {
1710  av_assert0(s->picture_ptr == s->picture);
1711  if ((ret = mjpeg_decode_scan_progressive_ac(s, predictor,
1712  ilv, prev_shift,
1713  point_transform)) < 0)
1714  return ret;
1715  } else {
1716  if ((ret = mjpeg_decode_scan(s, nb_components,
1717  prev_shift, point_transform,
1718  mb_bitmask, mb_bitmask_size, reference)) < 0)
1719  return ret;
1720  }
1721  }
1722 
1723  if (s->interlaced &&
1724  get_bits_left(&s->gb) > 32 &&
1725  show_bits(&s->gb, 8) == 0xFF) {
1726  GetBitContext bak = s->gb;
1727  align_get_bits(&bak);
1728  if (show_bits(&bak, 16) == 0xFFD1) {
1729  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1730  s->gb = bak;
1731  skip_bits(&s->gb, 16);
1732  s->bottom_field ^= 1;
1733 
1734  goto next_field;
1735  }
1736  }
1737 
1738  emms_c();
1739  return 0;
1740  out_of_range:
1741  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1742  return AVERROR_INVALIDDATA;
1743 }
1744 
1746 {
1747  if (get_bits(&s->gb, 16) != 4)
1748  return AVERROR_INVALIDDATA;
1749  s->restart_interval = get_bits(&s->gb, 16);
1750  s->restart_count = 0;
1751  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1752  s->restart_interval);
1753 
1754  return 0;
1755 }
1756 
1758 {
1759  int len, id, i;
1760 
1761  len = get_bits(&s->gb, 16);
1762  if (len < 6)
1763  return AVERROR_INVALIDDATA;
1764  if (8 * len > get_bits_left(&s->gb))
1765  return AVERROR_INVALIDDATA;
1766 
1767  id = get_bits_long(&s->gb, 32);
1768  len -= 6;
1769 
1770  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1771  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1772  av_fourcc2str(av_bswap32(id)), id, len);
1773 
1774  /* Buggy AVID, it puts EOI only at every 10th frame. */
1775  /* Also, this fourcc is used by non-avid files too, it holds some
1776  information, but it's always present in AVID-created files. */
1777  if (id == AV_RB32("AVI1")) {
1778  /* structure:
1779  4bytes AVI1
1780  1bytes polarity
1781  1bytes always zero
1782  4bytes field_size
1783  4bytes field_size_less_padding
1784  */
1785  s->buggy_avid = 1;
1786  i = get_bits(&s->gb, 8); len--;
1787  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1788  goto out;
1789  }
1790 
1791  if (id == AV_RB32("JFIF")) {
1792  int t_w, t_h, v1, v2;
1793  if (len < 8)
1794  goto out;
1795  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1796  v1 = get_bits(&s->gb, 8);
1797  v2 = get_bits(&s->gb, 8);
1798  skip_bits(&s->gb, 8);
1799 
1800  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1801  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1802  if ( s->avctx->sample_aspect_ratio.num <= 0
1803  || s->avctx->sample_aspect_ratio.den <= 0) {
1804  s->avctx->sample_aspect_ratio.num = 0;
1805  s->avctx->sample_aspect_ratio.den = 1;
1806  }
1807 
1808  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1809  av_log(s->avctx, AV_LOG_INFO,
1810  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1811  v1, v2,
1814 
1815  len -= 8;
1816  if (len >= 2) {
1817  t_w = get_bits(&s->gb, 8);
1818  t_h = get_bits(&s->gb, 8);
1819  if (t_w && t_h) {
1820  /* skip thumbnail */
1821  if (len -10 - (t_w * t_h * 3) > 0)
1822  len -= t_w * t_h * 3;
1823  }
1824  len -= 2;
1825  }
1826  goto out;
1827  }
1828 
1829  if ( id == AV_RB32("Adob")
1830  && len >= 7
1831  && show_bits(&s->gb, 8) == 'e'
1832  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1833  skip_bits(&s->gb, 8); /* 'e' */
1834  skip_bits(&s->gb, 16); /* version */
1835  skip_bits(&s->gb, 16); /* flags0 */
1836  skip_bits(&s->gb, 16); /* flags1 */
1837  s->adobe_transform = get_bits(&s->gb, 8);
1838  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1839  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1840  len -= 7;
1841  goto out;
1842  }
1843 
1844  if (id == AV_RB32("LJIF")) {
1845  int rgb = s->rgb;
1846  int pegasus_rct = s->pegasus_rct;
1847  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1848  av_log(s->avctx, AV_LOG_INFO,
1849  "Pegasus lossless jpeg header found\n");
1850  skip_bits(&s->gb, 16); /* version ? */
1851  skip_bits(&s->gb, 16); /* unknown always 0? */
1852  skip_bits(&s->gb, 16); /* unknown always 0? */
1853  skip_bits(&s->gb, 16); /* unknown always 0? */
1854  switch (i=get_bits(&s->gb, 8)) {
1855  case 1:
1856  rgb = 1;
1857  pegasus_rct = 0;
1858  break;
1859  case 2:
1860  rgb = 1;
1861  pegasus_rct = 1;
1862  break;
1863  default:
1864  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1865  }
1866 
1867  len -= 9;
1868  if (s->got_picture)
1869  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1870  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1871  goto out;
1872  }
1873 
1874  s->rgb = rgb;
1875  s->pegasus_rct = pegasus_rct;
1876 
1877  goto out;
1878  }
1879  if (id == AV_RL32("colr") && len > 0) {
1880  s->colr = get_bits(&s->gb, 8);
1881  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1882  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1883  len --;
1884  goto out;
1885  }
1886  if (id == AV_RL32("xfrm") && len > 0) {
1887  s->xfrm = get_bits(&s->gb, 8);
1888  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1889  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1890  len --;
1891  goto out;
1892  }
1893 
1894  /* JPS extension by VRex */
1895  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1896  int flags, layout, type;
1897  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1898  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1899 
1900  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1901  skip_bits(&s->gb, 16); len -= 2; /* block length */
1902  skip_bits(&s->gb, 8); /* reserved */
1903  flags = get_bits(&s->gb, 8);
1904  layout = get_bits(&s->gb, 8);
1905  type = get_bits(&s->gb, 8);
1906  len -= 4;
1907 
1908  av_freep(&s->stereo3d);
1909  s->stereo3d = av_stereo3d_alloc();
1910  if (!s->stereo3d) {
1911  goto out;
1912  }
1913  if (type == 0) {
1915  } else if (type == 1) {
1916  switch (layout) {
1917  case 0x01:
1919  break;
1920  case 0x02:
1922  break;
1923  case 0x03:
1925  break;
1926  }
1927  if (!(flags & 0x04)) {
1929  }
1930  }
1931  goto out;
1932  }
1933 
1934  /* EXIF metadata */
1935  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
1936  GetByteContext gbytes;
1937  int ret, le, ifd_offset, bytes_read;
1938  const uint8_t *aligned;
1939 
1940  skip_bits(&s->gb, 16); // skip padding
1941  len -= 2;
1942 
1943  // init byte wise reading
1944  aligned = align_get_bits(&s->gb);
1945  bytestream2_init(&gbytes, aligned, len);
1946 
1947  // read TIFF header
1948  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
1949  if (ret) {
1950  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
1951  } else {
1952  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
1953 
1954  // read 0th IFD and store the metadata
1955  // (return values > 0 indicate the presence of subimage metadata)
1956  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
1957  if (ret < 0) {
1958  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
1959  }
1960  }
1961 
1962  bytes_read = bytestream2_tell(&gbytes);
1963  skip_bits(&s->gb, bytes_read << 3);
1964  len -= bytes_read;
1965 
1966  goto out;
1967  }
1968 
1969  /* Apple MJPEG-A */
1970  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
1971  id = get_bits_long(&s->gb, 32);
1972  len -= 4;
1973  /* Apple MJPEG-A */
1974  if (id == AV_RB32("mjpg")) {
1975  /* structure:
1976  4bytes field size
1977  4bytes pad field size
1978  4bytes next off
1979  4bytes quant off
1980  4bytes huff off
1981  4bytes image off
1982  4bytes scan off
1983  4bytes data off
1984  */
1985  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1986  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
1987  }
1988  }
1989 
1990  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
1991  int id2;
1992  unsigned seqno;
1993  unsigned nummarkers;
1994 
1995  id = get_bits_long(&s->gb, 32);
1996  id2 = get_bits_long(&s->gb, 24);
1997  len -= 7;
1998  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
1999  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2000  goto out;
2001  }
2002 
2003  skip_bits(&s->gb, 8);
2004  seqno = get_bits(&s->gb, 8);
2005  len -= 2;
2006  if (seqno == 0) {
2007  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2008  goto out;
2009  }
2010 
2011  nummarkers = get_bits(&s->gb, 8);
2012  len -= 1;
2013  if (nummarkers == 0) {
2014  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2015  goto out;
2016  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2017  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2018  goto out;
2019  } else if (seqno > nummarkers) {
2020  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2021  goto out;
2022  }
2023 
2024  /* Allocate if this is the first APP2 we've seen. */
2025  if (s->iccnum == 0) {
2026  s->iccdata = av_mallocz(nummarkers * sizeof(*(s->iccdata)));
2027  s->iccdatalens = av_mallocz(nummarkers * sizeof(*(s->iccdatalens)));
2028  if (!s->iccdata || !s->iccdatalens) {
2029  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2030  return AVERROR(ENOMEM);
2031  }
2032  s->iccnum = nummarkers;
2033  }
2034 
2035  if (s->iccdata[seqno - 1]) {
2036  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2037  goto out;
2038  }
2039 
2040  s->iccdatalens[seqno - 1] = len;
2041  s->iccdata[seqno - 1] = av_malloc(len);
2042  if (!s->iccdata[seqno - 1]) {
2043  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2044  return AVERROR(ENOMEM);
2045  }
2046 
2047  memcpy(s->iccdata[seqno - 1], align_get_bits(&s->gb), len);
2048  skip_bits(&s->gb, len << 3);
2049  len = 0;
2050  s->iccread++;
2051 
2052  if (s->iccread > s->iccnum)
2053  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2054  }
2055 
2056 out:
2057  /* slow but needed for extreme adobe jpegs */
2058  if (len < 0)
2060  "mjpeg: error, decode_app parser read over the end\n");
2061  while (--len > 0)
2062  skip_bits(&s->gb, 8);
2063 
2064  return 0;
2065 }
2066 
2068 {
2069  int len = get_bits(&s->gb, 16);
2070  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2071  int i;
2072  char *cbuf = av_malloc(len - 1);
2073  if (!cbuf)
2074  return AVERROR(ENOMEM);
2075 
2076  for (i = 0; i < len - 2; i++)
2077  cbuf[i] = get_bits(&s->gb, 8);
2078  if (i > 0 && cbuf[i - 1] == '\n')
2079  cbuf[i - 1] = 0;
2080  else
2081  cbuf[i] = 0;
2082 
2083  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2084  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2085 
2086  /* buggy avid, it puts EOI only at every 10th frame */
2087  if (!strncmp(cbuf, "AVID", 4)) {
2088  parse_avid(s, cbuf, len);
2089  } else if (!strcmp(cbuf, "CS=ITU601"))
2090  s->cs_itu601 = 1;
2091  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2092  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2093  s->flipped = 1;
2094  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2095  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2096  s->multiscope = 2;
2097  }
2098 
2099  av_free(cbuf);
2100  }
2101 
2102  return 0;
2103 }
2104 
2105 /* return the 8 bit start code value and update the search
2106  state. Return -1 if no start code found */
2107 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2108 {
2109  const uint8_t *buf_ptr;
2110  unsigned int v, v2;
2111  int val;
2112  int skipped = 0;
2113 
2114  buf_ptr = *pbuf_ptr;
2115  while (buf_end - buf_ptr > 1) {
2116  v = *buf_ptr++;
2117  v2 = *buf_ptr;
2118  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2119  val = *buf_ptr++;
2120  goto found;
2121  }
2122  skipped++;
2123  }
2124  buf_ptr = buf_end;
2125  val = -1;
2126 found:
2127  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2128  *pbuf_ptr = buf_ptr;
2129  return val;
2130 }
2131 
2133  const uint8_t **buf_ptr, const uint8_t *buf_end,
2134  const uint8_t **unescaped_buf_ptr,
2135  int *unescaped_buf_size)
2136 {
2137  int start_code;
2138  start_code = find_marker(buf_ptr, buf_end);
2139 
2140  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2141  if (!s->buffer)
2142  return AVERROR(ENOMEM);
2143 
2144  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2145  if (start_code == SOS && !s->ls) {
2146  const uint8_t *src = *buf_ptr;
2147  const uint8_t *ptr = src;
2148  uint8_t *dst = s->buffer;
2149 
2150  #define copy_data_segment(skip) do { \
2151  ptrdiff_t length = (ptr - src) - (skip); \
2152  if (length > 0) { \
2153  memcpy(dst, src, length); \
2154  dst += length; \
2155  src = ptr; \
2156  } \
2157  } while (0)
2158 
2159  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2160  ptr = buf_end;
2161  copy_data_segment(0);
2162  } else {
2163  while (ptr < buf_end) {
2164  uint8_t x = *(ptr++);
2165 
2166  if (x == 0xff) {
2167  ptrdiff_t skip = 0;
2168  while (ptr < buf_end && x == 0xff) {
2169  x = *(ptr++);
2170  skip++;
2171  }
2172 
2173  /* 0xFF, 0xFF, ... */
2174  if (skip > 1) {
2175  copy_data_segment(skip);
2176 
2177  /* decrement src as it is equal to ptr after the
2178  * copy_data_segment macro and we might want to
2179  * copy the current value of x later on */
2180  src--;
2181  }
2182 
2183  if (x < RST0 || x > RST7) {
2184  copy_data_segment(1);
2185  if (x)
2186  break;
2187  }
2188  }
2189  }
2190  if (src < ptr)
2191  copy_data_segment(0);
2192  }
2193  #undef copy_data_segment
2194 
2195  *unescaped_buf_ptr = s->buffer;
2196  *unescaped_buf_size = dst - s->buffer;
2197  memset(s->buffer + *unescaped_buf_size, 0,
2199 
2200  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2201  (buf_end - *buf_ptr) - (dst - s->buffer));
2202  } else if (start_code == SOS && s->ls) {
2203  const uint8_t *src = *buf_ptr;
2204  uint8_t *dst = s->buffer;
2205  int bit_count = 0;
2206  int t = 0, b = 0;
2207  PutBitContext pb;
2208 
2209  /* find marker */
2210  while (src + t < buf_end) {
2211  uint8_t x = src[t++];
2212  if (x == 0xff) {
2213  while ((src + t < buf_end) && x == 0xff)
2214  x = src[t++];
2215  if (x & 0x80) {
2216  t -= FFMIN(2, t);
2217  break;
2218  }
2219  }
2220  }
2221  bit_count = t * 8;
2222  init_put_bits(&pb, dst, t);
2223 
2224  /* unescape bitstream */
2225  while (b < t) {
2226  uint8_t x = src[b++];
2227  put_bits(&pb, 8, x);
2228  if (x == 0xFF && b < t) {
2229  x = src[b++];
2230  if (x & 0x80) {
2231  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2232  x &= 0x7f;
2233  }
2234  put_bits(&pb, 7, x);
2235  bit_count--;
2236  }
2237  }
2238  flush_put_bits(&pb);
2239 
2240  *unescaped_buf_ptr = dst;
2241  *unescaped_buf_size = (bit_count + 7) >> 3;
2242  memset(s->buffer + *unescaped_buf_size, 0,
2244  } else {
2245  *unescaped_buf_ptr = *buf_ptr;
2246  *unescaped_buf_size = buf_end - *buf_ptr;
2247  }
2248 
2249  return start_code;
2250 }
2251 
2253 {
2254  int i;
2255 
2256  if (s->iccdata)
2257  for (i = 0; i < s->iccnum; i++)
2258  av_freep(&s->iccdata[i]);
2259  av_freep(&s->iccdata);
2260  av_freep(&s->iccdatalens);
2261 
2262  s->iccread = 0;
2263  s->iccnum = 0;
2264 }
2265 
2266 int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2267  AVPacket *avpkt)
2268 {
2269  AVFrame *frame = data;
2270  const uint8_t *buf = avpkt->data;
2271  int buf_size = avpkt->size;
2272  MJpegDecodeContext *s = avctx->priv_data;
2273  const uint8_t *buf_end, *buf_ptr;
2274  const uint8_t *unescaped_buf_ptr;
2275  int hshift, vshift;
2276  int unescaped_buf_size;
2277  int start_code;
2278  int i, index;
2279  int ret = 0;
2280  int is16bit;
2281 
2282  s->buf_size = buf_size;
2283 
2285  av_freep(&s->stereo3d);
2286  s->adobe_transform = -1;
2287 
2288  if (s->iccnum != 0)
2289  reset_icc_profile(s);
2290 
2291  buf_ptr = buf;
2292  buf_end = buf + buf_size;
2293  while (buf_ptr < buf_end) {
2294  /* find start next marker */
2295  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2296  &unescaped_buf_ptr,
2297  &unescaped_buf_size);
2298  /* EOF */
2299  if (start_code < 0) {
2300  break;
2301  } else if (unescaped_buf_size > INT_MAX / 8) {
2302  av_log(avctx, AV_LOG_ERROR,
2303  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2304  start_code, unescaped_buf_size, buf_size);
2305  return AVERROR_INVALIDDATA;
2306  }
2307  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2308  start_code, buf_end - buf_ptr);
2309 
2310  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2311 
2312  if (ret < 0) {
2313  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2314  goto fail;
2315  }
2316 
2317  s->start_code = start_code;
2318  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2319  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2320 
2321  /* process markers */
2322  if (start_code >= RST0 && start_code <= RST7) {
2323  av_log(avctx, AV_LOG_DEBUG,
2324  "restart marker: %d\n", start_code & 0x0f);
2325  /* APP fields */
2326  } else if (start_code >= APP0 && start_code <= APP15) {
2327  if ((ret = mjpeg_decode_app(s)) < 0)
2328  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2329  av_err2str(ret));
2330  /* Comment */
2331  } else if (start_code == COM) {
2332  ret = mjpeg_decode_com(s);
2333  if (ret < 0)
2334  return ret;
2335  } else if (start_code == DQT) {
2336  ret = ff_mjpeg_decode_dqt(s);
2337  if (ret < 0)
2338  return ret;
2339  }
2340 
2341  ret = -1;
2342 
2343  if (!CONFIG_JPEGLS_DECODER &&
2344  (start_code == SOF48 || start_code == LSE)) {
2345  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2346  return AVERROR(ENOSYS);
2347  }
2348 
2349  if (avctx->skip_frame == AVDISCARD_ALL) {
2350  switch(start_code) {
2351  case SOF0:
2352  case SOF1:
2353  case SOF2:
2354  case SOF3:
2355  case SOF48:
2356  case SOI:
2357  case SOS:
2358  case EOI:
2359  break;
2360  default:
2361  goto skip;
2362  }
2363  }
2364 
2365  switch (start_code) {
2366  case SOI:
2367  s->restart_interval = 0;
2368  s->restart_count = 0;
2369  s->raw_image_buffer = buf_ptr;
2370  s->raw_image_buffer_size = buf_end - buf_ptr;
2371  /* nothing to do on SOI */
2372  break;
2373  case DHT:
2374  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2375  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2376  goto fail;
2377  }
2378  break;
2379  case SOF0:
2380  case SOF1:
2381  if (start_code == SOF0)
2383  else
2385  s->lossless = 0;
2386  s->ls = 0;
2387  s->progressive = 0;
2388  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2389  goto fail;
2390  break;
2391  case SOF2:
2393  s->lossless = 0;
2394  s->ls = 0;
2395  s->progressive = 1;
2396  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2397  goto fail;
2398  break;
2399  case SOF3:
2402  s->lossless = 1;
2403  s->ls = 0;
2404  s->progressive = 0;
2405  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2406  goto fail;
2407  break;
2408  case SOF48:
2411  s->lossless = 1;
2412  s->ls = 1;
2413  s->progressive = 0;
2414  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2415  goto fail;
2416  break;
2417  case LSE:
2418  if (!CONFIG_JPEGLS_DECODER ||
2419  (ret = ff_jpegls_decode_lse(s)) < 0)
2420  goto fail;
2421  break;
2422  case EOI:
2423 eoi_parser:
2424  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2425  s->progressive && s->cur_scan && s->got_picture)
2427  s->cur_scan = 0;
2428  if (!s->got_picture) {
2429  av_log(avctx, AV_LOG_WARNING,
2430  "Found EOI before any SOF, ignoring\n");
2431  break;
2432  }
2433  if (s->interlaced) {
2434  s->bottom_field ^= 1;
2435  /* if not bottom field, do not output image yet */
2436  if (s->bottom_field == !s->interlace_polarity)
2437  break;
2438  }
2439  if (avctx->skip_frame == AVDISCARD_ALL) {
2440  s->got_picture = 0;
2441  goto the_end_no_picture;
2442  }
2443  if (s->avctx->hwaccel) {
2444  ret = s->avctx->hwaccel->end_frame(s->avctx);
2445  if (ret < 0)
2446  return ret;
2447 
2449  }
2450  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2451  return ret;
2452  *got_frame = 1;
2453  s->got_picture = 0;
2454 
2455  if (!s->lossless) {
2456  int qp = FFMAX3(s->qscale[0],
2457  s->qscale[1],
2458  s->qscale[2]);
2459  int qpw = (s->width + 15) / 16;
2460  AVBufferRef *qp_table_buf = av_buffer_alloc(qpw);
2461  if (qp_table_buf) {
2462  memset(qp_table_buf->data, qp, qpw);
2463  av_frame_set_qp_table(data, qp_table_buf, 0, FF_QSCALE_TYPE_MPEG1);
2464  }
2465 
2466  if(avctx->debug & FF_DEBUG_QP)
2467  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2468  }
2469 
2470  goto the_end;
2471  case SOS:
2472  s->raw_scan_buffer = buf_ptr;
2473  s->raw_scan_buffer_size = buf_end - buf_ptr;
2474 
2475  s->cur_scan++;
2476  if (avctx->skip_frame == AVDISCARD_ALL) {
2477  skip_bits(&s->gb, get_bits_left(&s->gb));
2478  break;
2479  }
2480 
2481  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2482  (avctx->err_recognition & AV_EF_EXPLODE))
2483  goto fail;
2484  break;
2485  case DRI:
2486  if ((ret = mjpeg_decode_dri(s)) < 0)
2487  return ret;
2488  break;
2489  case SOF5:
2490  case SOF6:
2491  case SOF7:
2492  case SOF9:
2493  case SOF10:
2494  case SOF11:
2495  case SOF13:
2496  case SOF14:
2497  case SOF15:
2498  case JPG:
2499  av_log(avctx, AV_LOG_ERROR,
2500  "mjpeg: unsupported coding type (%x)\n", start_code);
2501  break;
2502  }
2503 
2504 skip:
2505  /* eof process start code */
2506  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2507  av_log(avctx, AV_LOG_DEBUG,
2508  "marker parser used %d bytes (%d bits)\n",
2509  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2510  }
2511  if (s->got_picture && s->cur_scan) {
2512  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2513  goto eoi_parser;
2514  }
2515  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2516  return AVERROR_INVALIDDATA;
2517 fail:
2518  s->got_picture = 0;
2519  return ret;
2520 the_end:
2521 
2522  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2523 
2524  if (AV_RB32(s->upscale_h)) {
2525  int p;
2527  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2528  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2529  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2530  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2531  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2532  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2533  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2534  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2535  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2536  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2537  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2538  );
2539  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2540  if (ret)
2541  return ret;
2542 
2544  for (p = 0; p<s->nb_components; p++) {
2545  uint8_t *line = s->picture_ptr->data[p];
2546  int w = s->width;
2547  int h = s->height;
2548  if (!s->upscale_h[p])
2549  continue;
2550  if (p==1 || p==2) {
2551  w = AV_CEIL_RSHIFT(w, hshift);
2552  h = AV_CEIL_RSHIFT(h, vshift);
2553  }
2554  if (s->upscale_v[p] == 1)
2555  h = (h+1)>>1;
2556  av_assert0(w > 0);
2557  for (i = 0; i < h; i++) {
2558  if (s->upscale_h[p] == 1) {
2559  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2560  else line[w - 1] = line[(w - 1) / 2];
2561  for (index = w - 2; index > 0; index--) {
2562  if (is16bit)
2563  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2564  else
2565  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2566  }
2567  } else if (s->upscale_h[p] == 2) {
2568  if (is16bit) {
2569  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2570  if (w > 1)
2571  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2572  } else {
2573  line[w - 1] = line[(w - 1) / 3];
2574  if (w > 1)
2575  line[w - 2] = line[w - 1];
2576  }
2577  for (index = w - 3; index > 0; index--) {
2578  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2579  }
2580  }
2581  line += s->linesize[p];
2582  }
2583  }
2584  }
2585  if (AV_RB32(s->upscale_v)) {
2586  int p;
2588  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2589  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2590  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2591  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2592  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2593  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2594  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2595  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2596  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2597  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2598  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2599  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2600  );
2601  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2602  if (ret)
2603  return ret;
2604 
2606  for (p = 0; p < s->nb_components; p++) {
2607  uint8_t *dst;
2608  int w = s->width;
2609  int h = s->height;
2610  if (!s->upscale_v[p])
2611  continue;
2612  if (p==1 || p==2) {
2613  w = AV_CEIL_RSHIFT(w, hshift);
2614  h = AV_CEIL_RSHIFT(h, vshift);
2615  }
2616  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2617  for (i = h - 1; i; i--) {
2618  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2619  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2620  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2621  memcpy(dst, src1, w);
2622  } else {
2623  for (index = 0; index < w; index++)
2624  dst[index] = (src1[index] + src2[index]) >> 1;
2625  }
2626  dst -= s->linesize[p];
2627  }
2628  }
2629  }
2630  if (s->flipped && !s->rgb) {
2631  int j;
2632  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2633  if (ret)
2634  return ret;
2635 
2637  for (index=0; index<s->nb_components; index++) {
2638  uint8_t *dst = s->picture_ptr->data[index];
2639  int w = s->picture_ptr->width;
2640  int h = s->picture_ptr->height;
2641  if(index && index<3){
2642  w = AV_CEIL_RSHIFT(w, hshift);
2643  h = AV_CEIL_RSHIFT(h, vshift);
2644  }
2645  if(dst){
2646  uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1);
2647  for (i=0; i<h/2; i++) {
2648  for (j=0; j<w; j++)
2649  FFSWAP(int, dst[j], dst2[j]);
2650  dst += s->picture_ptr->linesize[index];
2651  dst2 -= s->picture_ptr->linesize[index];
2652  }
2653  }
2654  }
2655  }
2656  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2657  int w = s->picture_ptr->width;
2658  int h = s->picture_ptr->height;
2659  av_assert0(s->nb_components == 4);
2660  for (i=0; i<h; i++) {
2661  int j;
2662  uint8_t *dst[4];
2663  for (index=0; index<4; index++) {
2664  dst[index] = s->picture_ptr->data[index]
2665  + s->picture_ptr->linesize[index]*i;
2666  }
2667  for (j=0; j<w; j++) {
2668  int k = dst[3][j];
2669  int r = dst[0][j] * k;
2670  int g = dst[1][j] * k;
2671  int b = dst[2][j] * k;
2672  dst[0][j] = g*257 >> 16;
2673  dst[1][j] = b*257 >> 16;
2674  dst[2][j] = r*257 >> 16;
2675  dst[3][j] = 255;
2676  }
2677  }
2678  }
2679  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2680  int w = s->picture_ptr->width;
2681  int h = s->picture_ptr->height;
2682  av_assert0(s->nb_components == 4);
2683  for (i=0; i<h; i++) {
2684  int j;
2685  uint8_t *dst[4];
2686  for (index=0; index<4; index++) {
2687  dst[index] = s->picture_ptr->data[index]
2688  + s->picture_ptr->linesize[index]*i;
2689  }
2690  for (j=0; j<w; j++) {
2691  int k = dst[3][j];
2692  int r = (255 - dst[0][j]) * k;
2693  int g = (128 - dst[1][j]) * k;
2694  int b = (128 - dst[2][j]) * k;
2695  dst[0][j] = r*257 >> 16;
2696  dst[1][j] = (g*257 >> 16) + 128;
2697  dst[2][j] = (b*257 >> 16) + 128;
2698  dst[3][j] = 255;
2699  }
2700  }
2701  }
2702 
2703  if (s->stereo3d) {
2704  AVStereo3D *stereo = av_stereo3d_create_side_data(data);
2705  if (stereo) {
2706  stereo->type = s->stereo3d->type;
2707  stereo->flags = s->stereo3d->flags;
2708  }
2709  av_freep(&s->stereo3d);
2710  }
2711 
2712  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2713  AVFrameSideData *sd;
2714  size_t offset = 0;
2715  int total_size = 0;
2716  int i;
2717 
2718  /* Sum size of all parts. */
2719  for (i = 0; i < s->iccnum; i++)
2720  total_size += s->iccdatalens[i];
2721 
2722  sd = av_frame_new_side_data(data, AV_FRAME_DATA_ICC_PROFILE, total_size);
2723  if (!sd) {
2724  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2725  return AVERROR(ENOMEM);
2726  }
2727 
2728  /* Reassemble the parts, which are now in-order. */
2729  for (i = 0; i < s->iccnum; i++) {
2730  memcpy(sd->data + offset, s->iccdata[i], s->iccdatalens[i]);
2731  offset += s->iccdatalens[i];
2732  }
2733  }
2734 
2735  av_dict_copy(&((AVFrame *) data)->metadata, s->exif_metadata, 0);
2737 
2738 the_end_no_picture:
2739  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2740  buf_end - buf_ptr);
2741 // return buf_end - buf_ptr;
2742  return buf_ptr - buf;
2743 }
2744 
2746 {
2747  MJpegDecodeContext *s = avctx->priv_data;
2748  int i, j;
2749 
2750  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2751  av_log(avctx, AV_LOG_INFO, "Single field\n");
2752  }
2753 
2754  if (s->picture) {
2755  av_frame_free(&s->picture);
2756  s->picture_ptr = NULL;
2757  } else if (s->picture_ptr)
2759 
2760  av_freep(&s->buffer);
2761  av_freep(&s->stereo3d);
2762  av_freep(&s->ljpeg_buffer);
2763  s->ljpeg_buffer_size = 0;
2764 
2765  for (i = 0; i < 3; i++) {
2766  for (j = 0; j < 4; j++)
2767  ff_free_vlc(&s->vlcs[i][j]);
2768  }
2769  for (i = 0; i < MAX_COMPONENTS; i++) {
2770  av_freep(&s->blocks[i]);
2771  av_freep(&s->last_nnz[i]);
2772  }
2774 
2775  reset_icc_profile(s);
2776 
2778 
2779  return 0;
2780 }
2781 
2782 static void decode_flush(AVCodecContext *avctx)
2783 {
2784  MJpegDecodeContext *s = avctx->priv_data;
2785  s->got_picture = 0;
2786 }
2787 
2788 #if CONFIG_MJPEG_DECODER
2789 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2790 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2791 static const AVOption options[] = {
2792  { "extern_huff", "Use external huffman table.",
2793  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2794  { NULL },
2795 };
2796 
2797 static const AVClass mjpegdec_class = {
2798  .class_name = "MJPEG decoder",
2799  .item_name = av_default_item_name,
2800  .option = options,
2801  .version = LIBAVUTIL_VERSION_INT,
2802 };
2803 
2805  .name = "mjpeg",
2806  .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
2807  .type = AVMEDIA_TYPE_VIDEO,
2808  .id = AV_CODEC_ID_MJPEG,
2809  .priv_data_size = sizeof(MJpegDecodeContext),
2811  .close = ff_mjpeg_decode_end,
2813  .flush = decode_flush,
2814  .capabilities = AV_CODEC_CAP_DR1,
2815  .max_lowres = 3,
2816  .priv_class = &mjpegdec_class,
2818  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2820  .hw_configs = (const AVCodecHWConfigInternal*[]) {
2821 #if CONFIG_MJPEG_NVDEC_HWACCEL
2822  HWACCEL_NVDEC(mjpeg),
2823 #endif
2824 #if CONFIG_MJPEG_VAAPI_HWACCEL
2825  HWACCEL_VAAPI(mjpeg),
2826 #endif
2827  NULL
2828  },
2829 };
2830 #endif
2831 #if CONFIG_THP_DECODER
2833  .name = "thp",
2834  .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
2835  .type = AVMEDIA_TYPE_VIDEO,
2836  .id = AV_CODEC_ID_THP,
2837  .priv_data_size = sizeof(MJpegDecodeContext),
2839  .close = ff_mjpeg_decode_end,
2841  .flush = decode_flush,
2842  .capabilities = AV_CODEC_CAP_DR1,
2843  .max_lowres = 3,
2844  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
2845 };
2846 #endif
int block_stride[MAX_COMPONENTS]
Definition: mjpegdec.h:85
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:54
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1371
const struct AVCodec * codec
Definition: avcodec.h:1574
const char const char void * val
Definition: avisynth_c.h:863
const AVPixFmtDescriptor * pix_desc
!< stereoscopic information (cached, since it is read before frame allocation)
Definition: mjpegdec.h:135
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
Definition: mjpeg.h:81
int v_count[MAX_COMPONENTS]
Definition: mjpegdec.h:88
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:263
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:122
AVOption.
Definition: opt.h:246
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
enum AVPixelFormat hwaccel_sw_pix_fmt
Definition: mjpegdec.h:151
Definition: mjpeg.h:71
#define HWACCEL_NVDEC(codec)
Definition: hwaccel.h:71
Definition: mjpeg.h:111
Definition: mjpeg.h:73
float re
Definition: fft.c:82
Definition: mjpeg.h:40
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
Definition: mjpeg.h:42
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:377
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:502
size_t raw_image_buffer_size
Definition: mjpegdec.h:144
void(* clear_block)(int16_t *block)
Definition: blockdsp.h:36
#define avpriv_request_sample(...)
int h_scount[MAX_COMPONENTS]
Definition: mjpegdec.h:93
BlockDSPContext bdsp
Definition: mjpegdec.h:110
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:200
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2067
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2200
TIFF constants & data structures.
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:273
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int num
Numerator.
Definition: rational.h:59
int qscale[4]
quantizer scale calculated from quant_matrixes
Definition: mjpegdec.h:58
int size
Definition: avcodec.h:1478
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
uint8_t * buffer
Definition: mjpegdec.h:54
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1944
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
#define copy_data_segment(skip)
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
GLint GLenum type
Definition: opengl_enc.c:104
Definition: mjpeg.h:68
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
int dc_index[MAX_COMPONENTS]
Definition: mjpegdec.h:90
Definition: mjpeg.h:75
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
Definition: mjpeg.h:53
int linesize[MAX_COMPONENTS]
linesize << interlaced
Definition: mjpegdec.h:102
discard all
Definition: avcodec.h:811
uint8_t permutated[64]
Definition: idctdsp.h:33
Views are next to each other.
Definition: stereo3d.h:67
uint8_t upscale_v[4]
Definition: mjpegdec.h:69
uint8_t run
Definition: svq3.c:206
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2796
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:779
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2729
#define src
Definition: vp8dsp.c:254
int profile
profile
Definition: avcodec.h:2898
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:241
AVCodec.
Definition: avcodec.h:3481
EXIF metadata parser.
JPEG-LS decoder.
MJPEG encoder and decoder.
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:2997
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:28
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
int comp_index[MAX_COMPONENTS]
Definition: mjpegdec.h:89
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2252
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1532
HpelDSPContext hdsp
Definition: mjpegdec.h:111
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1688
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:2995
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3040
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
const uint8_t * raw_image_buffer
Definition: mjpegdec.h:143
int16_t block[64]
Definition: mjpegdec.h:104
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
Definition: jpegtables.c:127
Definition: mjpeg.h:72
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1745
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:176
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2651
uint16_t(* ljpeg_buffer)[4]
Definition: mjpegdec.h:127
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Definition: mjpeg.h:46
unsigned int ljpeg_buffer_size
Definition: mjpegdec.h:128
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:3228
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:2999
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
Definition: mjpeg.h:54
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
uint8_t * last_nnz[MAX_COMPONENTS]
Definition: mjpegdec.h:106
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
AVFrame * picture_ptr
Definition: mjpegdec.h:100
Structure to hold side data for an AVFrame.
Definition: frame.h:201
#define height
uint8_t * data
Definition: avcodec.h:1477
int quant_sindex[MAX_COMPONENTS]
Definition: mjpegdec.h:95
#define MAX_COMPONENTS
Definition: mjpegdec.h:44
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:100
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:2998
int h_count[MAX_COMPONENTS]
Definition: mjpegdec.h:87
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define ff_dlog(a,...)
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:378
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2804
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
ptrdiff_t size
Definition: opengl_enc.c:100
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2207
#define av_log(a,...)
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
static int aligned(int val)
Definition: dashdec.c:178
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2266
enum AVCodecID id
Definition: avcodec.h:3495
AVDictionary * exif_metadata
Definition: mjpegdec.h:131
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:846
uint8_t ** iccdata
Definition: mjpegdec.h:137
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1010
static const uint16_t mask[17]
Definition: lzw.c:38
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:945
#define PTRDIFF_SPECIFIER
Definition: internal.h:261
int nb_blocks[MAX_COMPONENTS]
Definition: mjpegdec.h:92
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:523
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2745
VLC vlcs[3][4]
Definition: mjpegdec.h:57
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:119
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2550
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
const char * r
Definition: vf_curves.c:114
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your local see the OFFSET() macro
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1645
Definition: graph2dot.c:48
simple assert() macros that are a bit more flexible than ISO C assert().
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
uint8_t bits
Definition: vp3data.h:202
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1359
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2107
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:120
Definition: mjpeg.h:39
Definition: mjpeg.h:70
Definition: vlc.h:26
static int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int use_static, int is_ac)
Definition: mjpegdec.c:53
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
JPEG-LS.
Definition: mjpeg.h:103
Definition: mjpeg.h:79
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
ScanTable scantable
Definition: mjpegdec.h:109
Definition: mjpeg.h:80
#define b
Definition: input.c:41
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1328
Definition: mjpeg.h:56
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:305
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2694
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
#define FFMIN(a, b)
Definition: common.h:96
Definition: mjpeg.h:44
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
uint8_t interlaced
Definition: mxfenc.c:2217
#define width
int component_id[MAX_COMPONENTS]
Definition: mjpegdec.h:86
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1757
#define NEG_USR32(a, s)
Definition: mathops.h:166
uint8_t w
Definition: llviddspenc.c:38
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
uint8_t raw_huffman_lengths[2][4][16]
Definition: mjpegdec.h:148
Definition: mjpeg.h:41
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:2996
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
int quant_index[4]
Definition: mjpegdec.h:97
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
int v_scount[MAX_COMPONENTS]
Definition: mjpegdec.h:94
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2705
int n
Definition: avisynth_c.h:760
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:706
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
GetBitContext gb
Definition: mjpegdec.h:49
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:72
HW acceleration through CUDA.
Definition: pixfmt.h:235
#define ZERO_RUN
Definition: mjpegdec.c:927
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
#define FF_ARRAY_ELEMS(a)
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
int bits
Definition: vlc.h:27
if(ret)
static const float pred[4]
Definition: siprdata.h:259
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
AVCodec ff_mjpeg_decoder
IDCTDSPContext idsp
Definition: mjpegdec.h:112
#define src1
Definition: h264pred.c:139
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
#define av_bswap32
Definition: bswap.h:33
Libavcodec external API header.
Views are on top of each other.
Definition: stereo3d.h:79
Definition: mjpeg.h:52
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:87
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:3750
enum AVCodecID codec_id
Definition: avcodec.h:1575
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
int debug
debug
Definition: avcodec.h:2650
AVStereo3D * stereo3d
Definition: mjpegdec.h:133
main external API structure.
Definition: avcodec.h:1565
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
uint8_t * data
The data buffer.
Definition: buffer.h:89
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:321
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:1590
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1964
uint8_t * data
Definition: frame.h:203
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
void * buf
Definition: avisynth_c.h:766
int extradata_size
Definition: avcodec.h:1667
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:130
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:346
int coded_height
Definition: avcodec.h:1753
Describe the class of an AVClass context structure.
Definition: log.h:67
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
static const AVProfile profiles[]
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:722
int index
Definition: gxfenc.c:89
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:761
int ac_index[MAX_COMPONENTS]
Definition: mjpegdec.h:91
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2193
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1044
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
Definition: mjpeg.h:45
uint64_t coefs_finished[MAX_COMPONENTS]
bitmask of which coefs have been completely decoded (progressive mode)
Definition: mjpegdec.h:107
Definition: mjpeg.h:48
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
enum AVPixelFormat hwaccel_pix_fmt
Definition: mjpegdec.h:152
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
uint8_t raw_huffman_values[2][4][256]
Definition: mjpegdec.h:149
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1480
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
#define MIN_CACHE_BITS
Definition: get_bits.h:128
Definition: mjpeg.h:47
#define HWACCEL_VAAPI(codec)
Definition: hwaccel.h:73
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
JPEG-LS extension parameters.
Definition: mjpeg.h:104
#define flags(name, subs,...)
Definition: cbs_av1.c:561
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
size_t raw_scan_buffer_size
Definition: mjpegdec.h:146
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3739
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
uint8_t level
Definition: svq3.c:207
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1567
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:521
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:3711
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:139
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:828
Definition: mjpeg.h:94
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:164
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1174
A reference to a data buffer.
Definition: buffer.h:81
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
const OptionDef options[]
Definition: ffmpeg_opt.c:3364
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:60
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
#define FF_DEBUG_QP
Definition: avcodec.h:2655
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:3227
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
int den
Denominator.
Definition: rational.h:60
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:790
static int lowres
Definition: ffplay.c:335
const uint8_t * raw_scan_buffer
Definition: mjpegdec.h:145
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
AVCodecContext * avctx
Definition: mjpegdec.h:48
void * priv_data
Definition: avcodec.h:1592
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
#define av_free(p)
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:2664
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1344
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:447
int got_picture
we found a SOF and picture is valid, too.
Definition: mjpegdec.h:101
int len
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:3759
int16_t(*[MAX_COMPONENTS] blocks)[64]
intermediate sums (progressive mode)
Definition: mjpegdec.h:105
AVFrame * picture
Definition: mjpegdec.h:99
void * hwaccel_picture_private
Definition: mjpegdec.h:153
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
The official guide to swscale for confused that is
Definition: swscale.txt:2
Definition: mjpeg.h:50
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
int last_dc[MAX_COMPONENTS]
Definition: mjpegdec.h:98
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:76
#define REFINE_BIT(j)
Definition: mjpegdec.c:919
uint8_t upscale_h[4]
Definition: mjpegdec.h:68
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2782
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2256
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:2222
#define av_always_inline
Definition: attributes.h:39
static const uint8_t start_code[]
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:544
Definition: mjpeg.h:82
#define VD
Definition: cuviddec.c:1118
#define FFSWAP(type, a, b)
Definition: common.h:99
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2132
#define FF_QSCALE_TYPE_MPEG1
Definition: internal.h:81
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
MJPEG decoder.
#define MKTAG(a, b, c, d)
Definition: common.h:366
AVCodec ff_thp_decoder
Definition: mjpeg.h:61
enum AVCodecID id
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1454
uint16_t quant_matrixes[4][64]
Definition: mjpegdec.h:56
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1176
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:95
GLuint buffer
Definition: opengl_enc.c:101
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
Definition: mjpeg.h:49
bitstream writer API