FFmpeg
mss2.c
Go to the documentation of this file.
1 /*
2  * Microsoft Screen 2 (aka Windows Media Video V9 Screen) decoder
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Microsoft Screen 2 (aka Windows Media Video V9 Screen) decoder
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "error_resilience.h"
28 #include "internal.h"
29 #include "mpeg_er.h"
30 #include "msmpeg4.h"
31 #include "qpeldsp.h"
32 #include "vc1.h"
33 #include "wmv2data.h"
34 #include "mss12.h"
35 #include "mss2dsp.h"
36 
37 typedef struct MSS2Context {
45 } MSS2Context;
46 
48 {
49  while ((c->high >> 15) - (c->low >> 15) < 2) {
50  if ((c->low ^ c->high) & 0x10000) {
51  c->high ^= 0x8000;
52  c->value ^= 0x8000;
53  c->low ^= 0x8000;
54  }
55  c->high = (uint16_t)c->high << 8 | 0xFF;
56  c->value = (uint16_t)c->value << 8 | bytestream2_get_byte(c->gbc.gB);
57  c->low = (uint16_t)c->low << 8;
58  }
59 }
60 
61 ARITH_GET_BIT(arith2)
62 
63 /* L. Stuiver and A. Moffat: "Piecewise Integer Mapping for Arithmetic Coding."
64  * In Proc. 8th Data Compression Conference (DCC '98), pp. 3-12, Mar. 1998 */
65 
66 static int arith2_get_scaled_value(int value, int n, int range)
67 {
68  int split = (n << 1) - range;
69 
70  if (value > split)
71  return split + (value - split >> 1);
72  else
73  return value;
74 }
75 
76 static void arith2_rescale_interval(ArithCoder *c, int range,
77  int low, int high, int n)
78 {
79  int split = (n << 1) - range;
80 
81  if (high > split)
82  c->high = split + (high - split << 1);
83  else
84  c->high = high;
85 
86  c->high += c->low - 1;
87 
88  if (low > split)
89  c->low += split + (low - split << 1);
90  else
91  c->low += low;
92 }
93 
94 static int arith2_get_number(ArithCoder *c, int n)
95 {
96  int range = c->high - c->low + 1;
97  int scale = av_log2(range) - av_log2(n);
98  int val;
99 
100  if (n << scale > range)
101  scale--;
102 
103  n <<= scale;
104 
105  val = arith2_get_scaled_value(c->value - c->low, n, range) >> scale;
106 
107  arith2_rescale_interval(c, range, val << scale, (val + 1) << scale, n);
108 
109  arith2_normalise(c);
110 
111  return val;
112 }
113 
114 static int arith2_get_prob(ArithCoder *c, int16_t *probs)
115 {
116  int range = c->high - c->low + 1, n = *probs;
117  int scale = av_log2(range) - av_log2(n);
118  int i = 0, val;
119 
120  if (n << scale > range)
121  scale--;
122 
123  n <<= scale;
124 
125  val = arith2_get_scaled_value(c->value - c->low, n, range) >> scale;
126  while (probs[++i] > val) ;
127 
128  arith2_rescale_interval(c, range,
129  probs[i] << scale, probs[i - 1] << scale, n);
130 
131  return i;
132 }
133 
134 ARITH_GET_MODEL_SYM(arith2)
135 
137 {
138  int diff = (c->high >> 16) - (c->low >> 16);
139  int bp = bytestream2_tell(c->gbc.gB) - 3 << 3;
140  int bits = 1;
141 
142  while (!(diff & 0x80)) {
143  bits++;
144  diff <<= 1;
145  }
146 
147  return (bits + bp + 7 >> 3) + ((c->low >> 16) + 1 == c->high >> 16);
148 }
149 
151 {
152  c->low = 0;
153  c->high = 0xFFFFFF;
154  c->value = bytestream2_get_be24(gB);
155  c->overread = 0;
156  c->gbc.gB = gB;
157  c->get_model_sym = arith2_get_model_sym;
159 }
160 
161 static int decode_pal_v2(MSS12Context *ctx, const uint8_t *buf, int buf_size)
162 {
163  int i, ncol;
164  uint32_t *pal = ctx->pal + 256 - ctx->free_colours;
165 
166  if (!ctx->free_colours)
167  return 0;
168 
169  ncol = *buf++;
170  if (ncol > ctx->free_colours || buf_size < 2 + ncol * 3)
171  return AVERROR_INVALIDDATA;
172  for (i = 0; i < ncol; i++)
173  *pal++ = AV_RB24(buf + 3 * i);
174 
175  return 1 + ncol * 3;
176 }
177 
178 static int decode_555(AVCodecContext *avctx, GetByteContext *gB, uint16_t *dst, ptrdiff_t stride,
179  int keyframe, int w, int h)
180 {
181  int last_symbol = 0, repeat = 0, prev_avail = 0;
182 
183  if (!keyframe) {
184  int x, y, endx, endy, t;
185 
186 #define READ_PAIR(a, b) \
187  a = bytestream2_get_byte(gB) << 4; \
188  t = bytestream2_get_byte(gB); \
189  a |= t >> 4; \
190  b = (t & 0xF) << 8; \
191  b |= bytestream2_get_byte(gB); \
192 
193  READ_PAIR(x, endx)
194  READ_PAIR(y, endy)
195 
196  if (endx >= w || endy >= h || x > endx || y > endy)
197  return AVERROR_INVALIDDATA;
198  dst += x + stride * y;
199  w = endx - x + 1;
200  h = endy - y + 1;
201  if (y)
202  prev_avail = 1;
203  }
204 
205  do {
206  uint16_t *p = dst;
207  do {
208  if (repeat-- < 1) {
209  int b = bytestream2_get_byte(gB);
210  if (b < 128)
211  last_symbol = b << 8 | bytestream2_get_byte(gB);
212  else if (b > 129) {
213  repeat = 0;
214  while (b-- > 130) {
215  if (repeat >= (INT_MAX >> 8) - 1) {
216  av_log(avctx, AV_LOG_ERROR, "repeat overflow\n");
217  return AVERROR_INVALIDDATA;
218  }
219  repeat = (repeat << 8) + bytestream2_get_byte(gB) + 1;
220  }
221  if (last_symbol == -2) {
222  int skip = FFMIN((unsigned)repeat, dst + w - p);
223  repeat -= skip;
224  p += skip;
225  }
226  } else
227  last_symbol = 127 - b;
228  }
229  if (last_symbol >= 0)
230  *p = last_symbol;
231  else if (last_symbol == -1 && prev_avail)
232  *p = *(p - stride);
233  } while (++p < dst + w);
234  dst += stride;
235  prev_avail = 1;
236  } while (--h);
237 
238  return 0;
239 }
240 
241 static int decode_rle(GetBitContext *gb, uint8_t *pal_dst, ptrdiff_t pal_stride,
242  uint8_t *rgb_dst, ptrdiff_t rgb_stride, uint32_t *pal,
243  int keyframe, int kf_slipt, int slice, int w, int h)
244 {
245  uint8_t bits[270] = { 0 };
246  uint32_t codes[270];
247  VLC vlc;
248 
249  int current_length = 0, read_codes = 0, next_code = 0, current_codes = 0;
250  int remaining_codes, surplus_codes, i;
251 
252  const int alphabet_size = 270 - keyframe;
253 
254  int last_symbol = 0, repeat = 0, prev_avail = 0;
255 
256  if (!keyframe) {
257  int x, y, clipw, cliph;
258 
259  x = get_bits(gb, 12);
260  y = get_bits(gb, 12);
261  clipw = get_bits(gb, 12) + 1;
262  cliph = get_bits(gb, 12) + 1;
263 
264  if (x + clipw > w || y + cliph > h)
265  return AVERROR_INVALIDDATA;
266  pal_dst += pal_stride * y + x;
267  rgb_dst += rgb_stride * y + x * 3;
268  w = clipw;
269  h = cliph;
270  if (y)
271  prev_avail = 1;
272  } else {
273  if (slice > 0) {
274  pal_dst += pal_stride * kf_slipt;
275  rgb_dst += rgb_stride * kf_slipt;
276  prev_avail = 1;
277  h -= kf_slipt;
278  } else
279  h = kf_slipt;
280  }
281 
282  /* read explicit codes */
283  do {
284  while (current_codes--) {
285  int symbol = get_bits(gb, 8);
286  if (symbol >= 204 - keyframe)
287  symbol += 14 - keyframe;
288  else if (symbol > 189)
289  symbol = get_bits1(gb) + (symbol << 1) - 190;
290  if (bits[symbol])
291  return AVERROR_INVALIDDATA;
292  bits[symbol] = current_length;
293  codes[symbol] = next_code++;
294  read_codes++;
295  }
296  current_length++;
297  next_code <<= 1;
298  remaining_codes = (1 << current_length) - next_code;
299  current_codes = get_bits(gb, av_ceil_log2(remaining_codes + 1));
300  if (current_length > 22 || current_codes > remaining_codes)
301  return AVERROR_INVALIDDATA;
302  } while (current_codes != remaining_codes);
303 
304  remaining_codes = alphabet_size - read_codes;
305 
306  /* determine the minimum length to fit the rest of the alphabet */
307  while ((surplus_codes = (2 << current_length) -
308  (next_code << 1) - remaining_codes) < 0) {
309  current_length++;
310  next_code <<= 1;
311  }
312 
313  /* add the rest of the symbols lexicographically */
314  for (i = 0; i < alphabet_size; i++)
315  if (!bits[i]) {
316  if (surplus_codes-- == 0) {
317  current_length++;
318  next_code <<= 1;
319  }
320  bits[i] = current_length;
321  codes[i] = next_code++;
322  }
323 
324  if (next_code != 1 << current_length)
325  return AVERROR_INVALIDDATA;
326 
327  if ((i = init_vlc(&vlc, 9, alphabet_size, bits, 1, 1, codes, 4, 4, 0)) < 0)
328  return i;
329 
330  /* frame decode */
331  do {
332  uint8_t *pp = pal_dst;
333  uint8_t *rp = rgb_dst;
334  do {
335  if (repeat-- < 1) {
336  int b = get_vlc2(gb, vlc.table, 9, 3);
337  if (b < 256)
338  last_symbol = b;
339  else if (b < 268) {
340  b -= 256;
341  if (b == 11)
342  b = get_bits(gb, 4) + 10;
343 
344  if (!b)
345  repeat = 0;
346  else
347  repeat = get_bits(gb, b);
348 
349  repeat += (1 << b) - 1;
350 
351  if (last_symbol == -2) {
352  int skip = FFMIN(repeat, pal_dst + w - pp);
353  repeat -= skip;
354  pp += skip;
355  rp += skip * 3;
356  }
357  } else
358  last_symbol = 267 - b;
359  }
360  if (last_symbol >= 0) {
361  *pp = last_symbol;
362  AV_WB24(rp, pal[last_symbol]);
363  } else if (last_symbol == -1 && prev_avail) {
364  *pp = *(pp - pal_stride);
365  memcpy(rp, rp - rgb_stride, 3);
366  }
367  rp += 3;
368  } while (++pp < pal_dst + w);
369  pal_dst += pal_stride;
370  rgb_dst += rgb_stride;
371  prev_avail = 1;
372  } while (--h);
373 
374  ff_free_vlc(&vlc);
375  return 0;
376 }
377 
378 static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
379  int x, int y, int w, int h, int wmv9_mask)
380 {
381  MSS2Context *ctx = avctx->priv_data;
382  MSS12Context *c = &ctx->c;
383  VC1Context *v = avctx->priv_data;
384  MpegEncContext *s = &v->s;
385  AVFrame *f;
386  int ret;
387 
388  ff_mpeg_flush(avctx);
389 
390  if ((ret = init_get_bits8(&s->gb, buf, buf_size)) < 0)
391  return ret;
392 
394 
395  if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
396  av_log(v->s.avctx, AV_LOG_ERROR, "header error\n");
397  return AVERROR_INVALIDDATA;
398  }
399 
400  if (s->pict_type != AV_PICTURE_TYPE_I) {
401  av_log(v->s.avctx, AV_LOG_ERROR, "expected I-frame\n");
402  return AVERROR_INVALIDDATA;
403  }
404 
405  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
406 
407  if ((ret = ff_mpv_frame_start(s, avctx)) < 0) {
408  av_log(v->s.avctx, AV_LOG_ERROR, "ff_mpv_frame_start error\n");
409  avctx->pix_fmt = AV_PIX_FMT_RGB24;
410  return ret;
411  }
412 
414 
415  v->bits = buf_size * 8;
416 
417  v->end_mb_x = (w + 15) >> 4;
418  s->end_mb_y = (h + 15) >> 4;
419  if (v->respic & 1)
420  v->end_mb_x = v->end_mb_x + 1 >> 1;
421  if (v->respic & 2)
422  s->end_mb_y = s->end_mb_y + 1 >> 1;
423 
425 
426  if (v->end_mb_x == s->mb_width && s->end_mb_y == s->mb_height) {
427  ff_er_frame_end(&s->er);
428  } else {
430  "disabling error correction due to block count mismatch %dx%d != %dx%d\n",
431  v->end_mb_x, s->end_mb_y, s->mb_width, s->mb_height);
432  }
433 
434  ff_mpv_frame_end(s);
435 
436  f = s->current_picture.f;
437 
438  if (v->respic == 3) {
439  ctx->dsp.upsample_plane(f->data[0], f->linesize[0], w, h);
440  ctx->dsp.upsample_plane(f->data[1], f->linesize[1], w+1 >> 1, h+1 >> 1);
441  ctx->dsp.upsample_plane(f->data[2], f->linesize[2], w+1 >> 1, h+1 >> 1);
442  } else if (v->respic)
444  "Asymmetric WMV9 rectangle subsampling");
445 
446  av_assert0(f->linesize[1] == f->linesize[2]);
447 
448  if (wmv9_mask != -1)
449  ctx->dsp.mss2_blit_wmv9_masked(c->rgb_pic + y * c->rgb_stride + x * 3,
450  c->rgb_stride, wmv9_mask,
451  c->pal_pic + y * c->pal_stride + x,
452  c->pal_stride,
453  f->data[0], f->linesize[0],
454  f->data[1], f->data[2], f->linesize[1],
455  w, h);
456  else
457  ctx->dsp.mss2_blit_wmv9(c->rgb_pic + y * c->rgb_stride + x * 3,
458  c->rgb_stride,
459  f->data[0], f->linesize[0],
460  f->data[1], f->data[2], f->linesize[1],
461  w, h);
462 
463  avctx->pix_fmt = AV_PIX_FMT_RGB24;
464 
465  return 0;
466 }
467 
468 struct Rectangle {
469  int coded, x, y, w, h;
470 };
471 
472 #define MAX_WMV9_RECTANGLES 20
473 #define ARITH2_PADDING 2
474 
475 static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
476  AVPacket *avpkt)
477 {
478  const uint8_t *buf = avpkt->data;
479  int buf_size = avpkt->size;
480  MSS2Context *ctx = avctx->priv_data;
481  MSS12Context *c = &ctx->c;
482  AVFrame *frame = data;
483  GetBitContext gb;
484  GetByteContext gB;
485  ArithCoder acoder;
486 
487  int keyframe, has_wmv9, has_mv, is_rle, is_555, ret;
488 
489  struct Rectangle wmv9rects[MAX_WMV9_RECTANGLES], *r;
490  int used_rects = 0, i, implicit_rect = 0, av_uninit(wmv9_mask);
491 
492  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
493  return ret;
494 
495  if (keyframe = get_bits1(&gb))
496  skip_bits(&gb, 7);
497  has_wmv9 = get_bits1(&gb);
498  has_mv = keyframe ? 0 : get_bits1(&gb);
499  is_rle = get_bits1(&gb);
500  is_555 = is_rle && get_bits1(&gb);
501  if (c->slice_split > 0)
502  ctx->split_position = c->slice_split;
503  else if (c->slice_split < 0) {
504  if (get_bits1(&gb)) {
505  if (get_bits1(&gb)) {
506  if (get_bits1(&gb))
507  ctx->split_position = get_bits(&gb, 16);
508  else
509  ctx->split_position = get_bits(&gb, 12);
510  } else
511  ctx->split_position = get_bits(&gb, 8) << 4;
512  } else {
513  if (keyframe)
514  ctx->split_position = avctx->height / 2;
515  }
516  } else
517  ctx->split_position = avctx->height;
518 
519  if (c->slice_split && (ctx->split_position < 1 - is_555 ||
520  ctx->split_position > avctx->height - 1))
521  return AVERROR_INVALIDDATA;
522 
523  align_get_bits(&gb);
524  buf += get_bits_count(&gb) >> 3;
525  buf_size -= get_bits_count(&gb) >> 3;
526 
527  if (buf_size < 1)
528  return AVERROR_INVALIDDATA;
529 
530  if (is_555 && (has_wmv9 || has_mv || c->slice_split && ctx->split_position))
531  return AVERROR_INVALIDDATA;
532 
533  avctx->pix_fmt = is_555 ? AV_PIX_FMT_RGB555 : AV_PIX_FMT_RGB24;
534  if (ctx->last_pic->format != avctx->pix_fmt)
535  av_frame_unref(ctx->last_pic);
536 
537  if (has_wmv9) {
538  bytestream2_init(&gB, buf, buf_size + ARITH2_PADDING);
539  arith2_init(&acoder, &gB);
540 
541  implicit_rect = !arith2_get_bit(&acoder);
542 
543  while (arith2_get_bit(&acoder)) {
544  if (used_rects == MAX_WMV9_RECTANGLES)
545  return AVERROR_INVALIDDATA;
546  r = &wmv9rects[used_rects];
547  if (!used_rects)
548  r->x = arith2_get_number(&acoder, avctx->width);
549  else
550  r->x = arith2_get_number(&acoder, avctx->width -
551  wmv9rects[used_rects - 1].x) +
552  wmv9rects[used_rects - 1].x;
553  r->y = arith2_get_number(&acoder, avctx->height);
554  r->w = arith2_get_number(&acoder, avctx->width - r->x) + 1;
555  r->h = arith2_get_number(&acoder, avctx->height - r->y) + 1;
556  used_rects++;
557  }
558 
559  if (implicit_rect && used_rects) {
560  av_log(avctx, AV_LOG_ERROR, "implicit_rect && used_rects > 0\n");
561  return AVERROR_INVALIDDATA;
562  }
563 
564  if (implicit_rect) {
565  wmv9rects[0].x = 0;
566  wmv9rects[0].y = 0;
567  wmv9rects[0].w = avctx->width;
568  wmv9rects[0].h = avctx->height;
569 
570  used_rects = 1;
571  }
572  for (i = 0; i < used_rects; i++) {
573  if (!implicit_rect && arith2_get_bit(&acoder)) {
574  av_log(avctx, AV_LOG_ERROR, "Unexpected grandchildren\n");
575  return AVERROR_INVALIDDATA;
576  }
577  if (!i) {
578  wmv9_mask = arith2_get_bit(&acoder) - 1;
579  if (!wmv9_mask)
580  wmv9_mask = arith2_get_number(&acoder, 256);
581  }
582  wmv9rects[i].coded = arith2_get_number(&acoder, 2);
583  }
584 
585  buf += arith2_get_consumed_bytes(&acoder);
586  buf_size -= arith2_get_consumed_bytes(&acoder);
587  if (buf_size < 1)
588  return AVERROR_INVALIDDATA;
589  }
590 
591  c->mvX = c->mvY = 0;
592  if (keyframe && !is_555) {
593  if ((i = decode_pal_v2(c, buf, buf_size)) < 0)
594  return AVERROR_INVALIDDATA;
595  buf += i;
596  buf_size -= i;
597  } else if (has_mv) {
598  buf += 4;
599  buf_size -= 4;
600  if (buf_size < 1)
601  return AVERROR_INVALIDDATA;
602  c->mvX = AV_RB16(buf - 4) - avctx->width;
603  c->mvY = AV_RB16(buf - 2) - avctx->height;
604  }
605 
606  if (c->mvX < 0 || c->mvY < 0) {
607  FFSWAP(uint8_t *, c->pal_pic, c->last_pal_pic);
608 
609  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
610  return ret;
611 
612  if (ctx->last_pic->data[0]) {
613  av_assert0(frame->linesize[0] == ctx->last_pic->linesize[0]);
614  c->last_rgb_pic = ctx->last_pic->data[0] +
615  ctx->last_pic->linesize[0] * (avctx->height - 1);
616  } else {
617  av_log(avctx, AV_LOG_ERROR, "Missing keyframe\n");
618  return AVERROR_INVALIDDATA;
619  }
620  } else {
621  if ((ret = ff_reget_buffer(avctx, ctx->last_pic, 0)) < 0)
622  return ret;
623  if ((ret = av_frame_ref(frame, ctx->last_pic)) < 0)
624  return ret;
625 
626  c->last_rgb_pic = NULL;
627  }
628  c->rgb_pic = frame->data[0] +
629  frame->linesize[0] * (avctx->height - 1);
630  c->rgb_stride = -frame->linesize[0];
631 
632  frame->key_frame = keyframe;
633  frame->pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
634 
635  if (is_555) {
636  bytestream2_init(&gB, buf, buf_size);
637 
638  if (decode_555(avctx, &gB, (uint16_t *)c->rgb_pic, c->rgb_stride >> 1,
639  keyframe, avctx->width, avctx->height))
640  return AVERROR_INVALIDDATA;
641 
642  buf_size -= bytestream2_tell(&gB);
643  } else {
644  if (keyframe) {
645  c->corrupted = 0;
647  if (c->slice_split)
649  }
650  if (is_rle) {
651  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
652  return ret;
653  if (ret = decode_rle(&gb, c->pal_pic, c->pal_stride,
654  c->rgb_pic, c->rgb_stride, c->pal, keyframe,
655  ctx->split_position, 0,
656  avctx->width, avctx->height))
657  return ret;
658  align_get_bits(&gb);
659 
660  if (c->slice_split)
661  if (ret = decode_rle(&gb, c->pal_pic, c->pal_stride,
662  c->rgb_pic, c->rgb_stride, c->pal, keyframe,
663  ctx->split_position, 1,
664  avctx->width, avctx->height))
665  return ret;
666 
667  align_get_bits(&gb);
668  buf += get_bits_count(&gb) >> 3;
669  buf_size -= get_bits_count(&gb) >> 3;
670  } else if (!implicit_rect || wmv9_mask != -1) {
671  if (c->corrupted)
672  return AVERROR_INVALIDDATA;
673  bytestream2_init(&gB, buf, buf_size + ARITH2_PADDING);
674  arith2_init(&acoder, &gB);
675  c->keyframe = keyframe;
676  if (c->corrupted = ff_mss12_decode_rect(&ctx->sc[0], &acoder, 0, 0,
677  avctx->width,
678  ctx->split_position))
679  return AVERROR_INVALIDDATA;
680 
681  buf += arith2_get_consumed_bytes(&acoder);
682  buf_size -= arith2_get_consumed_bytes(&acoder);
683  if (c->slice_split) {
684  if (buf_size < 1)
685  return AVERROR_INVALIDDATA;
686  bytestream2_init(&gB, buf, buf_size + ARITH2_PADDING);
687  arith2_init(&acoder, &gB);
688  if (c->corrupted = ff_mss12_decode_rect(&ctx->sc[1], &acoder, 0,
689  ctx->split_position,
690  avctx->width,
691  avctx->height - ctx->split_position))
692  return AVERROR_INVALIDDATA;
693 
694  buf += arith2_get_consumed_bytes(&acoder);
695  buf_size -= arith2_get_consumed_bytes(&acoder);
696  }
697  } else
698  memset(c->pal_pic, 0, c->pal_stride * avctx->height);
699  }
700 
701  if (has_wmv9) {
702  for (i = 0; i < used_rects; i++) {
703  int x = wmv9rects[i].x;
704  int y = wmv9rects[i].y;
705  int w = wmv9rects[i].w;
706  int h = wmv9rects[i].h;
707  if (wmv9rects[i].coded) {
708  int WMV9codedFrameSize;
709  if (buf_size < 4 || !(WMV9codedFrameSize = AV_RL24(buf)))
710  return AVERROR_INVALIDDATA;
711  if (ret = decode_wmv9(avctx, buf + 3, buf_size - 3,
712  x, y, w, h, wmv9_mask))
713  return ret;
714  buf += WMV9codedFrameSize + 3;
715  buf_size -= WMV9codedFrameSize + 3;
716  } else {
717  uint8_t *dst = c->rgb_pic + y * c->rgb_stride + x * 3;
718  if (wmv9_mask != -1) {
719  ctx->dsp.mss2_gray_fill_masked(dst, c->rgb_stride,
720  wmv9_mask,
721  c->pal_pic + y * c->pal_stride + x,
722  c->pal_stride,
723  w, h);
724  } else {
725  do {
726  memset(dst, 0x80, w * 3);
727  dst += c->rgb_stride;
728  } while (--h);
729  }
730  }
731  }
732  }
733 
734  if (buf_size)
735  av_log(avctx, AV_LOG_WARNING, "buffer not fully consumed\n");
736 
737  if (c->mvX < 0 || c->mvY < 0) {
738  av_frame_unref(ctx->last_pic);
739  ret = av_frame_ref(ctx->last_pic, frame);
740  if (ret < 0)
741  return ret;
742  }
743 
744  *got_frame = 1;
745 
746  return avpkt->size;
747 }
748 
749 static av_cold int wmv9_init(AVCodecContext *avctx)
750 {
751  VC1Context *v = avctx->priv_data;
752  int ret;
753 
754  v->s.avctx = avctx;
755 
756  if ((ret = ff_vc1_init_common(v)) < 0)
757  return ret;
758  ff_vc1dsp_init(&v->vc1dsp);
759 
760  v->profile = PROFILE_MAIN;
761 
764  v->res_y411 = 0;
765  v->res_sprite = 0;
766 
767  v->frmrtq_postproc = 7;
768  v->bitrtq_postproc = 31;
769 
770  v->res_x8 = 0;
771  v->multires = 0;
772  v->res_fasttx = 1;
773 
774  v->fastuvmc = 0;
775 
776  v->extended_mv = 0;
777 
778  v->dquant = 1;
779  v->vstransform = 1;
780 
781  v->res_transtab = 0;
782 
783  v->overlap = 0;
784 
785  v->resync_marker = 0;
786  v->rangered = 0;
787 
788  v->s.max_b_frames = avctx->max_b_frames = 0;
789  v->quantizer_mode = 0;
790 
791  v->finterpflag = 0;
792 
793  v->res_rtm_flag = 1;
794 
796 
797  if ((ret = ff_msmpeg4_decode_init(avctx)) < 0 ||
798  (ret = ff_vc1_decode_init_alloc_tables(v)) < 0)
799  return ret;
800 
801  /* error concealment */
804 
805  return 0;
806 }
807 
809 {
810  MSS2Context *const ctx = avctx->priv_data;
811 
812  av_frame_free(&ctx->last_pic);
813 
814  ff_mss12_decode_end(&ctx->c);
815  av_freep(&ctx->c.pal_pic);
816  av_freep(&ctx->c.last_pal_pic);
817  ff_vc1_decode_end(avctx);
818 
819  return 0;
820 }
821 
823 {
824  MSS2Context * const ctx = avctx->priv_data;
825  MSS12Context *c = &ctx->c;
826  int ret;
827  c->avctx = avctx;
828  if (ret = ff_mss12_decode_init(c, 1, &ctx->sc[0], &ctx->sc[1]))
829  return ret;
830  ctx->last_pic = av_frame_alloc();
831  c->pal_stride = c->mask_stride;
832  c->pal_pic = av_mallocz(c->pal_stride * avctx->height);
833  c->last_pal_pic = av_mallocz(c->pal_stride * avctx->height);
834  if (!c->pal_pic || !c->last_pal_pic || !ctx->last_pic) {
835  mss2_decode_end(avctx);
836  return AVERROR(ENOMEM);
837  }
838  if (ret = wmv9_init(avctx)) {
839  mss2_decode_end(avctx);
840  return ret;
841  }
842  ff_mss2dsp_init(&ctx->dsp);
843  ff_qpeldsp_init(&ctx->qdsp);
844 
845  avctx->pix_fmt = c->free_colours == 127 ? AV_PIX_FMT_RGB555
847 
848 
849  return 0;
850 }
851 
853  .name = "mss2",
854  .long_name = NULL_IF_CONFIG_SMALL("MS Windows Media Video V9 Screen"),
855  .type = AVMEDIA_TYPE_VIDEO,
856  .id = AV_CODEC_ID_MSS2,
857  .priv_data_size = sizeof(MSS2Context),
859  .close = mss2_decode_end,
861  .capabilities = AV_CODEC_CAP_DR1,
862 };
static av_cold int mss2_decode_init(AVCodecContext *avctx)
Definition: mss2.c:822
#define NULL
Definition: coverity.c:32
MSS12Context c
Definition: mss2.c:41
const char const char void * val
Definition: avisynth_c.h:863
#define ARITH2_PADDING
Definition: mss2.c:473
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:74
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int arith2_get_scaled_value(int value, int n, int range)
Definition: mss2.c:66
QpelDSPContext qdsp
Definition: mss2.c:43
The VC1 Context.
Definition: vc1.h:173
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
int overread
Definition: dstdec.c:59
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
int(* get_number)(struct ArithCoder *c, int n)
Definition: mss12.h:57
const uint8_t ff_wmv2_scantableB[64]
Definition: wmv2data.c:30
int high
Definition: mss12.h:49
av_cold int ff_mss12_decode_init(MSS12Context *c, int version, SliceContext *sc1, SliceContext *sc2)
Definition: mss12.c:577
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int ff_msmpeg4_decode_init(AVCodecContext *avctx)
Definition: msmpeg4dec.c:301
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int(* get_model_sym)(struct ArithCoder *c, Model *m)
Definition: mss12.h:56
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:154
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1828
#define avpriv_request_sample(...)
int extended_mv
Ext MV in P/B (not in Simple)
Definition: vc1.h:223
int value
Definition: mss12.h:49
void ff_er_frame_end(ERContext *s)
int corrupted
Definition: mss12.h:91
static void arith2_rescale_interval(ArithCoder *c, int range, int low, int high, int n)
Definition: mss2.c:76
int size
Definition: avcodec.h:1481
int av_log2(unsigned v)
Definition: intmath.c:26
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1778
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
int w
Definition: mss2.c:469
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
discard all
Definition: avcodec.h:814
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
Definition: vc1.h:222
int end_mb_x
Horizontal macroblock limit (used only by mss2)
Definition: vc1.h:399
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:87
uint8_t * rgb_pic
Definition: mss12.h:85
QpelDSPContext qdsp
Definition: mpegvideo.h:235
AVCodec.
Definition: avcodec.h:3492
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
int frmrtq_postproc
3 bits,
Definition: vc1.h:219
int ff_mss12_decode_rect(SliceContext *sc, ArithCoder *acoder, int x, int y, int width, int height)
Definition: mss12.c:539
int bits
Definition: vc1.h:179
int slice_split
Definition: mss12.h:92
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int res_transtab
reserved, always 0
Definition: vc1.h:188
uint32_t pal[256]
Definition: mss12.h:79
uint8_t
uint8_t * last_pal_pic
Definition: mss12.h:81
#define av_cold
Definition: attributes.h:82
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
int keyframe
Definition: mss12.h:89
union ArithCoder::@121 gbc
av_cold int ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
Definition: vc1.c:1584
#define f(width, name)
Definition: cbs_vp9.c:255
qpel_mc_func(* qpel_put)[16]
Definition: motion_est.h:91
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
ptrdiff_t rgb_stride
Definition: mss12.h:87
int coded
Definition: mss2.c:469
int split_position
Definition: mss2.c:39
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:180
quarterpel DSP functions
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags...
Definition: vc1.h:218
uint8_t * data
Definition: avcodec.h:1480
GetByteContext * gB
Definition: mss12.h:54
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:129
int ff_vc1_decode_end(AVCodecContext *avctx)
Close a VC1/WMV3 decoder.
Definition: vc1dec.c:592
#define av_log(a,...)
static int arith2_get_number(ArithCoder *c, int n)
Definition: mss2.c:94
int h
Definition: mss2.c:469
static int arith2_get_prob(ArithCoder *c, int16_t *probs)
Definition: mss2.c:114
static av_cold int wmv9_init(AVCodecContext *avctx)
Definition: mss2.c:749
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold void ff_mss2dsp_init(MSS2DSPContext *dsp)
Definition: mss2dsp.c:150
int res_y411
reserved, old interlaced mode
Definition: vc1.h:184
int overlap
overlapped transforms in use
Definition: vc1.h:226
int res_x8
reserved
Definition: vc1.h:185
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
Definition: vlc.h:38
static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int x, int y, int w, int h, int wmv9_mask)
Definition: mss2.c:378
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
ERContext er
Definition: mpegvideo.h:566
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available...
Definition: decode.c:2015
const char * r
Definition: vf_curves.c:114
void(* upsample_plane)(uint8_t *plane, ptrdiff_t plane_stride, int w, int h)
Definition: mss2dsp.h:46
#define ARITH_GET_MODEL_SYM(prefix)
Definition: mss12.h:120
int mvY
Definition: mss12.h:90
simple assert() macros that are a bit more flexible than ISO C assert().
void ff_vc1_decode_blocks(VC1Context *v)
Definition: vc1_block.c:3005
const char * name
Name of the codec implementation.
Definition: avcodec.h:3499
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
Definition: bytestream.h:87
uint8_t bits
Definition: vp3data.h:202
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:73
GetBitContext gb
Definition: mpegvideo.h:448
int resync_marker
could this stream contain resync markers
Definition: vc1.h:402
const uint8_t ff_wmv2_scantableA[64]
Definition: wmv2data.c:23
const uint8_t * zz_8x4
Zigzag scan table for TT_8x4 coding mode.
Definition: vc1.h:241
int res_rtm_flag
reserved, set to 1
Definition: vc1.h:191
Definition: vlc.h:26
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
static void arith2_init(ArithCoder *c, GetByteContext *gB)
Definition: mss2.c:150
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:2307
AVCodecContext * avctx
Definition: mss12.h:78
const uint8_t * zz_4x8
Zigzag scan table for TT_4x8 coding mode.
Definition: vc1.h:242
#define b
Definition: input.c:41
SliceContext sc[2]
Definition: mss2.c:44
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
#define FFMIN(a, b)
Definition: common.h:96
int width
picture width / height.
Definition: avcodec.h:1741
uint8_t w
Definition: llviddspenc.c:38
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:46
AVFormatContext * ctx
Definition: movenc.c:48
#define AV_WB24(p, d)
Definition: intreadwrite.h:450
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
Definition: vc1.c:626
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
ptrdiff_t pal_stride
Definition: mss12.h:82
#define s(width, name)
Definition: cbs_vp9.c:257
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
static int decode_555(AVCodecContext *avctx, GetByteContext *gB, uint16_t *dst, ptrdiff_t stride, int keyframe, int w, int h)
Definition: mss2.c:178
MotionEstContext me
Definition: mpegvideo.h:282
int n
Definition: avisynth_c.h:760
static int arith2_get_consumed_bytes(ArithCoder *c)
Definition: mss2.c:136
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
int rangered
RANGEREDFRM (range reduction) syntax element present at frame level.
Definition: vc1.h:189
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
int finterpflag
INTERPFRM present.
Definition: vc1.h:228
av_cold int ff_mss12_decode_end(MSS12Context *c)
Definition: mss12.c:689
int res_sprite
Simple/Main Profile sequence header.
Definition: vc1.h:183
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:87
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
int multires
frame-level RESPIC syntax element present
Definition: vc1.h:186
main external API structure.
Definition: avcodec.h:1568
static av_cold int mss2_decode_end(AVCodecContext *avctx)
Definition: mss2.c:808
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1968
void * buf
Definition: avisynth_c.h:766
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
struct AVFrame * f
Definition: mpegpicture.h:46
uint8_t respic
Frame-level flag for resized images.
Definition: vc1.h:274
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1207
int quantizer_mode
2 bits, quantizer mode used for sequence, see QUANT_*
Definition: vc1.h:227
int max_b_frames
max number of B-frames for encoding
Definition: mpegvideo.h:115
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:212
quarterpel DSP context
Definition: qpeldsp.h:72
int vstransform
variable-size [48]x[48] transform type + info
Definition: vc1.h:225
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
AVCodec ff_mss2_decoder
Definition: mss2.c:852
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
qpel_mc_func(* qpel_avg)[16]
Definition: motion_est.h:92
MpegEncContext s
Definition: vc1.h:174
VC1Context v
Definition: mss2.c:38
MpegEncContext.
Definition: mpegvideo.h:81
void ff_vc1_init_transposed_scantables(VC1Context *v)
Definition: vc1dec.c:403
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
int y
Definition: mss2.c:469
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
void(* mss2_gray_fill_masked)(uint8_t *dst, ptrdiff_t dst_stride, int maskcolor, const uint8_t *mask, ptrdiff_t mask_stride, int w, int h)
Definition: mss2dsp.h:43
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
void(* mss2_blit_wmv9_masked)(uint8_t *dst, ptrdiff_t dst_stride, int maskcolor, const uint8_t *mask, ptrdiff_t mask_stride, const uint8_t *srcy, ptrdiff_t srcy_stride, const uint8_t *srcu, const uint8_t *srcv, ptrdiff_t srcuv_stride, int w, int h)
Definition: mss2dsp.h:37
common internal api header.
void ff_mss12_slicecontext_reset(SliceContext *sc)
Definition: mss12.c:435
int res_fasttx
reserved, always 1
Definition: vc1.h:187
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:375
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Definition: avcodec.h:3029
Microsoft Screen 2 (aka Windows Media Video V9 Screen) decoder DSP routines.
int free_colours
Definition: mss12.h:88
ptrdiff_t mask_stride
Definition: mss12.h:84
void * priv_data
Definition: avcodec.h:1595
static av_always_inline int diff(const uint32_t a, const uint32_t b)
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1431
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
int low
Definition: mss12.h:49
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
uint8_t * last_rgb_pic
Definition: mss12.h:86
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
int bitrtq_postproc
5 bits, quantized framerate-based postprocessing strength
Definition: vc1.h:220
AVFrame * last_pic
Definition: mss2.c:40
static int decode_pal_v2(MSS12Context *ctx, const uint8_t *buf, int buf_size)
Definition: mss2.c:161
#define av_uninit(x)
Definition: attributes.h:148
#define ARITH_GET_BIT(prefix)
Definition: mss12.h:104
int ff_vc1_decode_init_alloc_tables(VC1Context *v)
Definition: vc1dec.c:324
static void arith2_normalise(ArithCoder *c)
Definition: mss2.c:47
#define av_freep(p)
#define READ_PAIR(a, b)
MSS2DSPContext dsp
Definition: mss2.c:42
static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: mss2.c:475
int dquant
How qscale varies with MBs, 2 bits (not in Simple)
Definition: vc1.h:224
int mvX
Definition: mss12.h:90
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
#define FFSWAP(type, a, b)
Definition: common.h:99
#define stride
void(* mss2_blit_wmv9)(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *srcy, ptrdiff_t srcy_stride, const uint8_t *srcu, const uint8_t *srcv, ptrdiff_t srcuv_stride, int w, int h)
Definition: mss2dsp.h:33
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
This structure stores compressed data.
Definition: avcodec.h:1457
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1179
av_cold void ff_vc1dsp_init(VC1DSPContext *dsp)
Definition: vc1dsp.c:971
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:984
uint8_t * pal_pic
Definition: mss12.h:80
static int decode_rle(GetBitContext *gb, uint8_t *pal_dst, ptrdiff_t pal_stride, uint8_t *rgb_dst, ptrdiff_t rgb_stride, uint32_t *pal, int keyframe, int kf_slipt, int slice, int w, int h)
Definition: mss2.c:241
VC1DSPContext vc1dsp
Definition: vc1.h:177
Predicted.
Definition: avutil.h:275
Common header for Microsoft Screen 1 and 2.
int x
Definition: mss2.c:469