FFmpeg
rasc.c
Go to the documentation of this file.
1 /*
2  * RemotelyAnywhere Screen Capture decoder
3  *
4  * Copyright (c) 2018 Paul B Mahol
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/opt.h"
30 
31 #include "avcodec.h"
32 #include "bytestream.h"
33 #include "internal.h"
34 
35 #include <zlib.h>
36 
37 #define KBND MKTAG('K', 'B', 'N', 'D')
38 #define FINT MKTAG('F', 'I', 'N', 'T')
39 #define INIT MKTAG('I', 'N', 'I', 'T')
40 #define BNDL MKTAG('B', 'N', 'D', 'L')
41 #define KFRM MKTAG('K', 'F', 'R', 'M')
42 #define DLTA MKTAG('D', 'L', 'T', 'A')
43 #define MOUS MKTAG('M', 'O', 'U', 'S')
44 #define MPOS MKTAG('M', 'P', 'O', 'S')
45 #define MOVE MKTAG('M', 'O', 'V', 'E')
46 #define EMPT MKTAG('E', 'M', 'P', 'T')
47 
48 typedef struct RASCContext {
49  AVClass *class;
56  unsigned cursor_w;
57  unsigned cursor_h;
58  unsigned cursor_x;
59  unsigned cursor_y;
60  int stride;
61  int bpp;
62  z_stream zstream;
66 } RASCContext;
67 
68 static void clear_plane(AVCodecContext *avctx, AVFrame *frame)
69 {
70  RASCContext *s = avctx->priv_data;
71  uint8_t *dst = frame->data[0];
72 
73  if (!dst)
74  return;
75 
76  for (int y = 0; y < avctx->height; y++) {
77  memset(dst, 0, avctx->width * s->bpp);
78  dst += frame->linesize[0];
79  }
80 }
81 
82 static void copy_plane(AVCodecContext *avctx, AVFrame *src, AVFrame *dst)
83 {
84  RASCContext *s = avctx->priv_data;
85  uint8_t *srcp = src->data[0];
86  uint8_t *dstp = dst->data[0];
87 
88  for (int y = 0; y < avctx->height; y++) {
89  memcpy(dstp, srcp, s->stride);
90  srcp += src->linesize[0];
91  dstp += dst->linesize[0];
92  }
93 }
94 
95 static int init_frames(AVCodecContext *avctx)
96 {
97  RASCContext *s = avctx->priv_data;
98  int ret;
99 
100  av_frame_unref(s->frame1);
101  av_frame_unref(s->frame2);
102  if ((ret = ff_get_buffer(avctx, s->frame1, 0)) < 0)
103  return ret;
104 
105  if ((ret = ff_get_buffer(avctx, s->frame2, 0)) < 0)
106  return ret;
107 
108  clear_plane(avctx, s->frame2);
109  clear_plane(avctx, s->frame1);
110 
111  return 0;
112 }
113 
114 static int decode_fint(AVCodecContext *avctx,
115  AVPacket *avpkt, unsigned size)
116 {
117  RASCContext *s = avctx->priv_data;
118  GetByteContext *gb = &s->gb;
119  unsigned w, h, fmt;
120  int ret;
121 
122  if (bytestream2_peek_le32(gb) != 0x65) {
123  if (!s->frame2->data[0] || !s->frame1->data[0])
124  return AVERROR_INVALIDDATA;
125 
126  clear_plane(avctx, s->frame2);
127  clear_plane(avctx, s->frame1);
128  return 0;
129  }
130 
131  bytestream2_skip(gb, 8);
132  w = bytestream2_get_le32(gb);
133  h = bytestream2_get_le32(gb);
134  bytestream2_skip(gb, 30);
135  fmt = bytestream2_get_le16(gb);
136  bytestream2_skip(gb, 24);
137 
138  switch (fmt) {
139  case 8: s->stride = FFALIGN(w, 4);
140  s->bpp = 1;
141  fmt = AV_PIX_FMT_PAL8; break;
142  case 16: s->stride = w * 2;
143  s->bpp = 2;
144  fmt = AV_PIX_FMT_RGB555LE; break;
145  case 32: s->stride = w * 4;
146  s->bpp = 4;
147  fmt = AV_PIX_FMT_BGR0; break;
148  default: return AVERROR_INVALIDDATA;
149  }
150 
151  ret = ff_set_dimensions(avctx, w, h);
152  if (ret < 0)
153  return ret;
154  avctx->width = w;
155  avctx->height = h;
156  avctx->pix_fmt = fmt;
157 
158  ret = init_frames(avctx);
159  if (ret < 0)
160  return ret;
161 
162  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
163  uint32_t *pal = (uint32_t *)s->frame2->data[1];
164 
165  for (int i = 0; i < 256; i++)
166  pal[i] = bytestream2_get_le32(gb) | 0xFF000000u;
167  }
168 
169  return 0;
170 }
171 
172 static int decode_zlib(AVCodecContext *avctx, AVPacket *avpkt,
173  unsigned size, unsigned uncompressed_size)
174 {
175  RASCContext *s = avctx->priv_data;
176  GetByteContext *gb = &s->gb;
177  int zret;
178 
179  zret = inflateReset(&s->zstream);
180  if (zret != Z_OK) {
181  av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
182  return AVERROR_EXTERNAL;
183  }
184 
185  av_fast_padded_malloc(&s->delta, &s->delta_size, uncompressed_size);
186  if (!s->delta)
187  return AVERROR(ENOMEM);
188 
189  s->zstream.next_in = avpkt->data + bytestream2_tell(gb);
190  s->zstream.avail_in = FFMIN(size, bytestream2_get_bytes_left(gb));
191 
192  s->zstream.next_out = s->delta;
193  s->zstream.avail_out = s->delta_size;
194 
195  zret = inflate(&s->zstream, Z_FINISH);
196  if (zret != Z_STREAM_END) {
197  av_log(avctx, AV_LOG_ERROR,
198  "Inflate failed with return code: %d.\n", zret);
199  return AVERROR_INVALIDDATA;
200  }
201 
202  return 0;
203 }
204 
205 static int decode_move(AVCodecContext *avctx,
206  AVPacket *avpkt, unsigned size)
207 {
208  RASCContext *s = avctx->priv_data;
209  GetByteContext *gb = &s->gb;
211  unsigned pos, compression, nb_moves;
212  unsigned uncompressed_size;
213  int ret;
214 
215  pos = bytestream2_tell(gb);
216  bytestream2_skip(gb, 8);
217  nb_moves = bytestream2_get_le32(gb);
218  bytestream2_skip(gb, 8);
219  compression = bytestream2_get_le32(gb);
220 
221  if (nb_moves > INT32_MAX / 16 || nb_moves > avctx->width * avctx->height)
222  return AVERROR_INVALIDDATA;
223 
224  uncompressed_size = 16 * nb_moves;
225 
226  if (compression == 1) {
227  ret = decode_zlib(avctx, avpkt,
228  size - (bytestream2_tell(gb) - pos),
229  uncompressed_size);
230  if (ret < 0)
231  return ret;
232  bytestream2_init(&mc, s->delta, uncompressed_size);
233  } else if (compression == 0) {
234  bytestream2_init(&mc, avpkt->data + bytestream2_tell(gb),
236  } else if (compression == 2) {
237  avpriv_request_sample(avctx, "compression %d", compression);
238  return AVERROR_PATCHWELCOME;
239  } else {
240  return AVERROR_INVALIDDATA;
241  }
242 
243  if (bytestream2_get_bytes_left(&mc) < uncompressed_size)
244  return AVERROR_INVALIDDATA;
245 
246  for (int i = 0; i < nb_moves; i++) {
247  int type, start_x, start_y, end_x, end_y, mov_x, mov_y;
248  uint8_t *e2, *b1, *b2;
249  int w, h;
250 
251  type = bytestream2_get_le16(&mc);
252  start_x = bytestream2_get_le16(&mc);
253  start_y = bytestream2_get_le16(&mc);
254  end_x = bytestream2_get_le16(&mc);
255  end_y = bytestream2_get_le16(&mc);
256  mov_x = bytestream2_get_le16(&mc);
257  mov_y = bytestream2_get_le16(&mc);
258  bytestream2_skip(&mc, 2);
259 
260  if (start_x >= avctx->width || start_y >= avctx->height ||
261  end_x >= avctx->width || end_y >= avctx->height ||
262  mov_x >= avctx->width || mov_y >= avctx->height) {
263  continue;
264  }
265 
266  if (start_x >= end_x || start_y >= end_y)
267  continue;
268 
269  w = end_x - start_x;
270  h = end_y - start_y;
271 
272  if (mov_x + w > avctx->width || mov_y + h > avctx->height)
273  continue;
274 
275  if (!s->frame2->data[0] || !s->frame1->data[0])
276  return AVERROR_INVALIDDATA;
277 
278  b1 = s->frame1->data[0] + s->frame1->linesize[0] * (start_y + h - 1) + start_x * s->bpp;
279  b2 = s->frame2->data[0] + s->frame2->linesize[0] * (start_y + h - 1) + start_x * s->bpp;
280  e2 = s->frame2->data[0] + s->frame2->linesize[0] * (mov_y + h - 1) + mov_x * s->bpp;
281 
282  if (type == 2) {
283  for (int j = 0; j < h; j++) {
284  memcpy(b1, b2, w * s->bpp);
285  b1 -= s->frame1->linesize[0];
286  b2 -= s->frame2->linesize[0];
287  }
288  } else if (type == 1) {
289  for (int j = 0; j < h; j++) {
290  memset(b2, 0, w * s->bpp);
291  b2 -= s->frame2->linesize[0];
292  }
293  } else if (type == 0) {
294  uint8_t *buffer;
295 
296  av_fast_padded_malloc(&s->delta, &s->delta_size, w * h * s->bpp);
297  buffer = s->delta;
298  if (!buffer)
299  return AVERROR(ENOMEM);
300 
301  for (int j = 0; j < h; j++) {
302  memcpy(buffer + j * w * s->bpp, e2, w * s->bpp);
303  e2 -= s->frame2->linesize[0];
304  }
305 
306  for (int j = 0; j < h; j++) {
307  memcpy(b2, buffer + j * w * s->bpp, w * s->bpp);
308  b2 -= s->frame2->linesize[0];
309  }
310  } else {
311  return AVERROR_INVALIDDATA;
312  }
313  }
314 
315  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
316 
317  return 0;
318 }
319 
320 #define NEXT_LINE \
321  if (cx >= w * s->bpp) { \
322  cx = 0; \
323  cy--; \
324  b1 -= s->frame1->linesize[0]; \
325  b2 -= s->frame2->linesize[0]; \
326  } \
327  len--;
328 
329 static int decode_dlta(AVCodecContext *avctx,
330  AVPacket *avpkt, unsigned size)
331 {
332  RASCContext *s = avctx->priv_data;
333  GetByteContext *gb = &s->gb;
335  unsigned uncompressed_size, pos;
336  unsigned x, y, w, h;
337  int ret, cx, cy, compression;
338  uint8_t *b1, *b2;
339 
340  pos = bytestream2_tell(gb);
341  bytestream2_skip(gb, 12);
342  uncompressed_size = bytestream2_get_le32(gb);
343  x = bytestream2_get_le32(gb);
344  y = bytestream2_get_le32(gb);
345  w = bytestream2_get_le32(gb);
346  h = bytestream2_get_le32(gb);
347 
348  if (x >= avctx->width || y >= avctx->height ||
349  w > avctx->width || h > avctx->height)
350  return AVERROR_INVALIDDATA;
351 
352  if (x + w > avctx->width || y + h > avctx->height)
353  return AVERROR_INVALIDDATA;
354 
355  bytestream2_skip(gb, 4);
356  compression = bytestream2_get_le32(gb);
357 
358  if (compression == 1) {
359  if (w * h * s->bpp * 3 < uncompressed_size)
360  return AVERROR_INVALIDDATA;
361  ret = decode_zlib(avctx, avpkt, size, uncompressed_size);
362  if (ret < 0)
363  return ret;
364  bytestream2_init(&dc, s->delta, uncompressed_size);
365  } else if (compression == 0) {
366  if (bytestream2_get_bytes_left(gb) < uncompressed_size)
367  return AVERROR_INVALIDDATA;
368  bytestream2_init(&dc, avpkt->data + bytestream2_tell(gb),
369  uncompressed_size);
370  } else if (compression == 2) {
371  avpriv_request_sample(avctx, "compression %d", compression);
372  return AVERROR_PATCHWELCOME;
373  } else {
374  return AVERROR_INVALIDDATA;
375  }
376 
377  if (!s->frame2->data[0] || !s->frame1->data[0])
378  return AVERROR_INVALIDDATA;
379 
380  b1 = s->frame1->data[0] + s->frame1->linesize[0] * (y + h - 1) + x * s->bpp;
381  b2 = s->frame2->data[0] + s->frame2->linesize[0] * (y + h - 1) + x * s->bpp;
382  cx = 0, cy = h;
383  while (bytestream2_get_bytes_left(&dc) > 0) {
384  int type = bytestream2_get_byte(&dc);
385  int len = bytestream2_get_byte(&dc);
386  unsigned fill;
387 
388  switch (type) {
389  case 1:
390  while (len > 0 && cy > 0) {
391  cx++;
392  NEXT_LINE
393  }
394  break;
395  case 2:
396  while (len > 0 && cy > 0) {
397  int v0 = b1[cx];
398  int v1 = b2[cx];
399 
400  b2[cx] = v0;
401  b1[cx] = v1;
402  cx++;
403  NEXT_LINE
404  }
405  break;
406  case 3:
407  while (len > 0 && cy > 0) {
408  fill = bytestream2_get_byte(&dc);
409  b1[cx] = b2[cx];
410  b2[cx] = fill;
411  cx++;
412  NEXT_LINE
413  }
414  break;
415  case 4:
416  fill = bytestream2_get_byte(&dc);
417  while (len > 0 && cy > 0) {
418  AV_WL32(b1 + cx, AV_RL32(b2 + cx));
419  AV_WL32(b2 + cx, fill);
420  cx++;
421  NEXT_LINE
422  }
423  break;
424  case 7:
425  fill = bytestream2_get_le32(&dc);
426  while (len > 0 && cy > 0) {
427  AV_WL32(b1 + cx, AV_RL32(b2 + cx));
428  AV_WL32(b2 + cx, fill);
429  cx += 4;
430  NEXT_LINE
431  }
432  break;
433  case 10:
434  while (len > 0 && cy > 0) {
435  cx += 4;
436  NEXT_LINE
437  }
438  break;
439  case 12:
440  while (len > 0 && cy > 0) {
441  unsigned v0, v1;
442 
443  v0 = AV_RL32(b2 + cx);
444  v1 = AV_RL32(b1 + cx);
445  AV_WL32(b2 + cx, v1);
446  AV_WL32(b1 + cx, v0);
447  cx += 4;
448  NEXT_LINE
449  }
450  break;
451  case 13:
452  while (len > 0 && cy > 0) {
453  fill = bytestream2_get_le32(&dc);
454  AV_WL32(b1 + cx, AV_RL32(b2 + cx));
455  AV_WL32(b2 + cx, fill);
456  cx += 4;
457  NEXT_LINE
458  }
459  break;
460  default:
461  avpriv_request_sample(avctx, "runlen %d", type);
462  return AVERROR_INVALIDDATA;
463  }
464  }
465 
466  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
467 
468  return 0;
469 }
470 
471 static int decode_kfrm(AVCodecContext *avctx,
472  AVPacket *avpkt, unsigned size)
473 {
474  RASCContext *s = avctx->priv_data;
475  GetByteContext *gb = &s->gb;
476  uint8_t *dst;
477  unsigned pos;
478  int zret, ret;
479 
480  pos = bytestream2_tell(gb);
481  if (bytestream2_peek_le32(gb) == 0x65) {
482  ret = decode_fint(avctx, avpkt, size);
483  if (ret < 0)
484  return ret;
485  }
486 
487  if (!s->frame2->data[0])
488  return AVERROR_INVALIDDATA;
489 
490  zret = inflateReset(&s->zstream);
491  if (zret != Z_OK) {
492  av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
493  return AVERROR_EXTERNAL;
494  }
495 
496  s->zstream.next_in = avpkt->data + bytestream2_tell(gb);
497  s->zstream.avail_in = bytestream2_get_bytes_left(gb);
498 
499  dst = s->frame2->data[0] + (avctx->height - 1) * s->frame2->linesize[0];
500  for (int i = 0; i < avctx->height; i++) {
501  s->zstream.next_out = dst;
502  s->zstream.avail_out = s->stride;
503 
504  zret = inflate(&s->zstream, Z_SYNC_FLUSH);
505  if (zret != Z_OK && zret != Z_STREAM_END) {
506  av_log(avctx, AV_LOG_ERROR,
507  "Inflate failed with return code: %d.\n", zret);
508  return AVERROR_INVALIDDATA;
509  }
510 
511  dst -= s->frame2->linesize[0];
512  }
513 
514  dst = s->frame1->data[0] + (avctx->height - 1) * s->frame1->linesize[0];
515  for (int i = 0; i < avctx->height; i++) {
516  s->zstream.next_out = dst;
517  s->zstream.avail_out = s->stride;
518 
519  zret = inflate(&s->zstream, Z_SYNC_FLUSH);
520  if (zret != Z_OK && zret != Z_STREAM_END) {
521  av_log(avctx, AV_LOG_ERROR,
522  "Inflate failed with return code: %d.\n", zret);
523  return AVERROR_INVALIDDATA;
524  }
525 
526  dst -= s->frame1->linesize[0];
527  }
528 
529  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
530 
531  return 0;
532 }
533 
534 static int decode_mous(AVCodecContext *avctx,
535  AVPacket *avpkt, unsigned size)
536 {
537  RASCContext *s = avctx->priv_data;
538  GetByteContext *gb = &s->gb;
539  unsigned w, h, pos, uncompressed_size;
540  int ret;
541 
542  pos = bytestream2_tell(gb);
543  bytestream2_skip(gb, 8);
544  w = bytestream2_get_le32(gb);
545  h = bytestream2_get_le32(gb);
546  bytestream2_skip(gb, 12);
547  uncompressed_size = bytestream2_get_le32(gb);
548 
549  if (w > avctx->width || h > avctx->height)
550  return AVERROR_INVALIDDATA;
551 
552  if (uncompressed_size != 3 * w * h)
553  return AVERROR_INVALIDDATA;
554 
555  av_fast_padded_malloc(&s->cursor, &s->cursor_size, uncompressed_size);
556  if (!s->cursor)
557  return AVERROR(ENOMEM);
558 
559  ret = decode_zlib(avctx, avpkt,
560  size - (bytestream2_tell(gb) - pos),
561  uncompressed_size);
562  if (ret < 0)
563  return ret;
564  memcpy(s->cursor, s->delta, uncompressed_size);
565 
566  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
567 
568  s->cursor_w = w;
569  s->cursor_h = h;
570 
571  return 0;
572 }
573 
574 static int decode_mpos(AVCodecContext *avctx,
575  AVPacket *avpkt, unsigned size)
576 {
577  RASCContext *s = avctx->priv_data;
578  GetByteContext *gb = &s->gb;
579  unsigned pos;
580 
581  pos = bytestream2_tell(gb);
582  bytestream2_skip(gb, 8);
583  s->cursor_x = bytestream2_get_le32(gb);
584  s->cursor_y = bytestream2_get_le32(gb);
585 
586  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
587 
588  return 0;
589 }
590 
591 static void draw_cursor(AVCodecContext *avctx)
592 {
593  RASCContext *s = avctx->priv_data;
594  uint8_t *dst, *pal;
595 
596  if (!s->cursor)
597  return;
598 
599  if (s->cursor_x >= avctx->width || s->cursor_y >= avctx->height)
600  return;
601 
602  if (s->cursor_x + s->cursor_w > avctx->width ||
603  s->cursor_y + s->cursor_h > avctx->height)
604  return;
605 
606  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
607  pal = s->frame->data[1];
608  for (int i = 0; i < s->cursor_h; i++) {
609  for (int j = 0; j < s->cursor_w; j++) {
610  int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
611  int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
612  int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
613  int best = INT_MAX;
614  int index = 0;
615  int dist;
616 
617  if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
618  continue;
619 
620  dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + (s->cursor_x + j);
621  for (int k = 0; k < 256; k++) {
622  int pr = pal[k * 4 + 0];
623  int pg = pal[k * 4 + 1];
624  int pb = pal[k * 4 + 2];
625 
626  dist = FFABS(cr - pr) + FFABS(cg - pg) + FFABS(cb - pb);
627  if (dist < best) {
628  best = dist;
629  index = k;
630  }
631  }
632  dst[0] = index;
633  }
634  }
635  } else if (avctx->pix_fmt == AV_PIX_FMT_RGB555LE) {
636  for (int i = 0; i < s->cursor_h; i++) {
637  for (int j = 0; j < s->cursor_w; j++) {
638  int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
639  int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
640  int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
641 
642  if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
643  continue;
644 
645  cr >>= 3; cg >>=3; cb >>= 3;
646  dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + 2 * (s->cursor_x + j);
647  AV_WL16(dst, cr | cg << 5 | cb << 10);
648  }
649  }
650  } else if (avctx->pix_fmt == AV_PIX_FMT_BGR0) {
651  for (int i = 0; i < s->cursor_h; i++) {
652  for (int j = 0; j < s->cursor_w; j++) {
653  int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
654  int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
655  int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
656 
657  if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
658  continue;
659 
660  dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + 4 * (s->cursor_x + j);
661  dst[0] = cb;
662  dst[1] = cg;
663  dst[2] = cr;
664  }
665  }
666  }
667 }
668 
669 static int decode_frame(AVCodecContext *avctx,
670  void *data, int *got_frame,
671  AVPacket *avpkt)
672 {
673  RASCContext *s = avctx->priv_data;
674  GetByteContext *gb = &s->gb;
675  int ret, intra = 0;
676  AVFrame *frame = data;
677 
678  bytestream2_init(gb, avpkt->data, avpkt->size);
679 
680  if (bytestream2_peek_le32(gb) == EMPT)
681  return avpkt->size;
682 
683  s->frame = frame;
684 
685  while (bytestream2_get_bytes_left(gb) > 0) {
686  unsigned type, size = 0;
687 
688  if (bytestream2_get_bytes_left(gb) < 8)
689  return AVERROR_INVALIDDATA;
690 
691  type = bytestream2_get_le32(gb);
692  if (type == KBND || type == BNDL) {
693  intra = type == KBND;
694  type = bytestream2_get_le32(gb);
695  }
696 
697  size = bytestream2_get_le32(gb);
699  return AVERROR_INVALIDDATA;
700 
701  switch (type) {
702  case FINT:
703  case INIT:
704  ret = decode_fint(avctx, avpkt, size);
705  break;
706  case KFRM:
707  ret = decode_kfrm(avctx, avpkt, size);
708  break;
709  case DLTA:
710  ret = decode_dlta(avctx, avpkt, size);
711  break;
712  case MOVE:
713  ret = decode_move(avctx, avpkt, size);
714  break;
715  case MOUS:
716  ret = decode_mous(avctx, avpkt, size);
717  break;
718  case MPOS:
719  ret = decode_mpos(avctx, avpkt, size);
720  break;
721  default:
722  bytestream2_skip(gb, size);
723  }
724 
725  if (ret < 0)
726  return ret;
727  }
728 
729  if (!s->frame2->data[0] || !s->frame1->data[0])
730  return AVERROR_INVALIDDATA;
731 
732  if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0)
733  return ret;
734 
735  copy_plane(avctx, s->frame2, s->frame);
736  if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
737  memcpy(s->frame->data[1], s->frame2->data[1], 1024);
738  if (!s->skip_cursor)
739  draw_cursor(avctx);
740 
741  s->frame->key_frame = intra;
742  s->frame->pict_type = intra ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
743 
744  *got_frame = 1;
745 
746  return avpkt->size;
747 }
748 
750 {
751  RASCContext *s = avctx->priv_data;
752  int zret;
753 
754  s->zstream.zalloc = Z_NULL;
755  s->zstream.zfree = Z_NULL;
756  s->zstream.opaque = Z_NULL;
757  zret = inflateInit(&s->zstream);
758  if (zret != Z_OK) {
759  av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
760  return AVERROR_EXTERNAL;
761  }
762 
763  s->frame1 = av_frame_alloc();
764  s->frame2 = av_frame_alloc();
765  if (!s->frame1 || !s->frame2)
766  return AVERROR(ENOMEM);
767 
768  return 0;
769 }
770 
772 {
773  RASCContext *s = avctx->priv_data;
774 
775  av_freep(&s->cursor);
776  s->cursor_size = 0;
777  av_freep(&s->delta);
778  s->delta_size = 0;
779  av_frame_free(&s->frame1);
780  av_frame_free(&s->frame2);
781  inflateEnd(&s->zstream);
782 
783  return 0;
784 }
785 
786 static void decode_flush(AVCodecContext *avctx)
787 {
788  RASCContext *s = avctx->priv_data;
789 
790  clear_plane(avctx, s->frame1);
791  clear_plane(avctx, s->frame2);
792 }
793 
794 static const AVOption options[] = {
795 { "skip_cursor", "skip the cursor", offsetof(RASCContext, skip_cursor), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
796 { NULL },
797 };
798 
799 static const AVClass rasc_decoder_class = {
800  .class_name = "rasc decoder",
801  .item_name = av_default_item_name,
802  .option = options,
803  .version = LIBAVUTIL_VERSION_INT,
804 };
805 
807  .name = "rasc",
808  .long_name = NULL_IF_CONFIG_SMALL("RemotelyAnywhere Screen Capture"),
809  .type = AVMEDIA_TYPE_VIDEO,
810  .id = AV_CODEC_ID_RASC,
811  .priv_data_size = sizeof(RASCContext),
812  .init = decode_init,
813  .close = decode_close,
814  .decode = decode_frame,
815  .flush = decode_flush,
816  .capabilities = AV_CODEC_CAP_DR1,
817  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
819  .priv_class = &rasc_decoder_class,
820 };
RASCContext
Definition: rasc.c:48
AVCodec
AVCodec.
Definition: avcodec.h:3481
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
init_frames
static int init_frames(AVCodecContext *avctx)
Definition: rasc.c:95
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
BNDL
#define BNDL
Definition: rasc.c:40
opt.h
decode_kfrm
static int decode_kfrm(AVCodecContext *avctx, AVPacket *avpkt, unsigned size)
Definition: rasc.c:471
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:279
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:112
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
FINT
#define FINT
Definition: rasc.c:38
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
RASCContext::delta_size
int delta_size
Definition: rasc.c:53
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
AVOption
AVOption.
Definition: opt.h:246
data
const char data[16]
Definition: mxf.c:91
RASCContext::frame
AVFrame * frame
Definition: rasc.c:63
options
static const AVOption options[]
Definition: rasc.c:794
EMPT
#define EMPT
Definition: rasc.c:46
srcp
BYTE int const BYTE * srcp
Definition: avisynth_c.h:908
decode_zlib
static int decode_zlib(AVCodecContext *avctx, AVPacket *avpkt, unsigned size, unsigned uncompressed_size)
Definition: rasc.c:172
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
bytestream2_get_bytes_left
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
decode_mpos
static int decode_mpos(AVCodecContext *avctx, AVPacket *avpkt, unsigned size)
Definition: rasc.c:574
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
fmt
const char * fmt
Definition: avisynth_c.h:861
dstp
BYTE * dstp
Definition: avisynth_c.h:908
v0
#define v0
Definition: regdef.h:26
inflate
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:197
NEXT_LINE
#define NEXT_LINE
Definition: rasc.c:320
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
src
#define src
Definition: vp8dsp.c:254
copy_plane
static void copy_plane(AVCodecContext *avctx, AVFrame *src, AVFrame *dst)
Definition: rasc.c:82
RASCContext::frame2
AVFrame * frame2
Definition: rasc.c:65
RASCContext::cursor_x
unsigned cursor_x
Definition: rasc.c:58
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
RASCContext::stride
int stride
Definition: rasc.c:60
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
rasc_decoder_class
static const AVClass rasc_decoder_class
Definition: rasc.c:799
s
#define s(width, name)
Definition: cbs_vp9.c:257
RASCContext::cursor_h
unsigned cursor_h
Definition: rasc.c:57
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: rasc.c:786
RASCContext::zstream
z_stream zstream
Definition: rasc.c:62
draw_cursor
static void draw_cursor(AVCodecContext *avctx)
Definition: rasc.c:591
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: rasc.c:669
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
INIT
#define INIT
Definition: rasc.c:39
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:240
MOVE
#define MOVE
Definition: rasc.c:45
RASCContext::cursor_y
unsigned cursor_y
Definition: rasc.c:59
RASCContext::skip_cursor
int skip_cursor
Definition: rasc.c:50
KBND
#define KBND
Definition: rasc.c:37
index
int index
Definition: gxfenc.c:89
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
DLTA
#define DLTA
Definition: rasc.c:42
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1965
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
RASCContext::cursor_size
int cursor_size
Definition: rasc.c:55
AVPacket::size
int size
Definition: avcodec.h:1478
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
ff_rasc_decoder
AVCodec ff_rasc_decoder
Definition: rasc.c:806
AV_CODEC_ID_RASC
@ AV_CODEC_ID_RASC
Definition: avcodec.h:454
size
int size
Definition: twinvq_data.h:11134
RASCContext::cursor
uint8_t * cursor
Definition: rasc.c:54
RASCContext::cursor_w
unsigned cursor_w
Definition: rasc.c:56
RASCContext::bpp
int bpp
Definition: rasc.c:61
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
decode_mous
static int decode_mous(AVCodecContext *avctx, AVPacket *avpkt, unsigned size)
Definition: rasc.c:534
decode_fint
static int decode_fint(AVCodecContext *avctx, AVPacket *avpkt, unsigned size)
Definition: rasc.c:114
KFRM
#define KFRM
Definition: rasc.c:41
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:108
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: rasc.c:749
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:277
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
MPOS
#define MPOS
Definition: rasc.c:44
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
len
int len
Definition: vorbis_enc_data.h:452
AVCodecContext::height
int height
Definition: avcodec.h:1738
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
avcodec.h
decode_close
static av_cold int decode_close(AVCodecContext *avctx)
Definition: rasc.c:771
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
clear_plane
static void clear_plane(AVCodecContext *avctx, AVFrame *frame)
Definition: rasc.c:68
MOUS
#define MOUS
Definition: rasc.c:43
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
decode_dlta
static int decode_dlta(AVCodecContext *avctx, AVPacket *avpkt, unsigned size)
Definition: rasc.c:329
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:113
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
RASCContext::gb
GetByteContext gb
Definition: rasc.c:51
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
RASCContext::delta
uint8_t * delta
Definition: rasc.c:52
decode_move
static int decode_move(AVCodecContext *avctx, AVPacket *avpkt, unsigned size)
Definition: rasc.c:205
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
h
h
Definition: vp9dsp_template.c:2038
RASCContext::frame1
AVFrame * frame1
Definition: rasc.c:64
mc
#define mc
Definition: vf_colormatrix.c:102