FFmpeg
rasc.c
Go to the documentation of this file.
1 /*
2  * RemotelyAnywhere Screen Capture decoder
3  *
4  * Copyright (c) 2018 Paul B Mahol
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <stdio.h>
24 #include <string.h>
25 
26 #include "libavutil/opt.h"
27 
28 #include "avcodec.h"
29 #include "bytestream.h"
30 #include "codec_internal.h"
31 #include "decode.h"
32 #include "zlib_wrapper.h"
33 
34 #include <zlib.h>
35 
36 #define KBND MKTAG('K', 'B', 'N', 'D')
37 #define FINT MKTAG('F', 'I', 'N', 'T')
38 #define INIT MKTAG('I', 'N', 'I', 'T')
39 #define BNDL MKTAG('B', 'N', 'D', 'L')
40 #define KFRM MKTAG('K', 'F', 'R', 'M')
41 #define DLTA MKTAG('D', 'L', 'T', 'A')
42 #define MOUS MKTAG('M', 'O', 'U', 'S')
43 #define MPOS MKTAG('M', 'P', 'O', 'S')
44 #define MOVE MKTAG('M', 'O', 'V', 'E')
45 #define EMPT MKTAG('E', 'M', 'P', 'T')
46 
47 typedef struct RASCContext {
48  AVClass *class;
51  uint8_t *delta;
53  uint8_t *cursor;
55  unsigned cursor_w;
56  unsigned cursor_h;
57  unsigned cursor_x;
58  unsigned cursor_y;
59  int stride;
60  int bpp;
65 } RASCContext;
66 
67 static void clear_plane(AVCodecContext *avctx, AVFrame *frame)
68 {
69  RASCContext *s = avctx->priv_data;
70  uint8_t *dst = frame->data[0];
71 
72  if (!dst)
73  return;
74 
75  for (int y = 0; y < avctx->height; y++) {
76  memset(dst, 0, avctx->width * s->bpp);
77  dst += frame->linesize[0];
78  }
79 }
80 
81 static void copy_plane(AVCodecContext *avctx, AVFrame *src, AVFrame *dst)
82 {
83  RASCContext *s = avctx->priv_data;
84  uint8_t *srcp = src->data[0];
85  uint8_t *dstp = dst->data[0];
86 
87  for (int y = 0; y < avctx->height; y++) {
88  memcpy(dstp, srcp, s->stride);
89  srcp += src->linesize[0];
90  dstp += dst->linesize[0];
91  }
92 }
93 
94 static int init_frames(AVCodecContext *avctx)
95 {
96  RASCContext *s = avctx->priv_data;
97  int ret;
98 
99  av_frame_unref(s->frame1);
100  av_frame_unref(s->frame2);
101  if ((ret = ff_get_buffer(avctx, s->frame1, 0)) < 0)
102  return ret;
103 
104  if ((ret = ff_get_buffer(avctx, s->frame2, 0)) < 0)
105  return ret;
106 
107  clear_plane(avctx, s->frame2);
108  clear_plane(avctx, s->frame1);
109 
110  return 0;
111 }
112 
113 static int decode_fint(AVCodecContext *avctx,
114  const AVPacket *avpkt, unsigned size)
115 {
116  RASCContext *s = avctx->priv_data;
117  GetByteContext *gb = &s->gb;
118  unsigned w, h, fmt;
119  int ret;
120 
121  if (bytestream2_peek_le32(gb) != 0x65) {
122  if (!s->frame2->data[0] || !s->frame1->data[0])
123  return AVERROR_INVALIDDATA;
124 
125  clear_plane(avctx, s->frame2);
126  clear_plane(avctx, s->frame1);
127  return 0;
128  }
129  if (bytestream2_get_bytes_left(gb) < 72)
130  return AVERROR_INVALIDDATA;
131 
132  bytestream2_skip(gb, 8);
133  w = bytestream2_get_le32(gb);
134  h = bytestream2_get_le32(gb);
135  bytestream2_skip(gb, 30);
136  fmt = bytestream2_get_le16(gb);
137  bytestream2_skip(gb, 24);
138 
139  switch (fmt) {
140  case 8: s->stride = FFALIGN(w, 4);
141  s->bpp = 1;
142  fmt = AV_PIX_FMT_PAL8; break;
143  case 16: s->stride = w * 2;
144  s->bpp = 2;
145  fmt = AV_PIX_FMT_RGB555LE; break;
146  case 32: s->stride = w * 4;
147  s->bpp = 4;
148  fmt = AV_PIX_FMT_BGR0; break;
149  default: return AVERROR_INVALIDDATA;
150  }
151 
152  ret = ff_set_dimensions(avctx, w, h);
153  if (ret < 0)
154  return ret;
155  avctx->width = w;
156  avctx->height = h;
157  avctx->pix_fmt = fmt;
158 
159  ret = init_frames(avctx);
160  if (ret < 0)
161  return ret;
162 
163  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
164  uint32_t *pal = (uint32_t *)s->frame2->data[1];
165 
166  for (int i = 0; i < 256; i++)
167  pal[i] = bytestream2_get_le32(gb) | 0xFF000000u;
168  }
169 
170  return 0;
171 }
172 
173 static int decode_zlib(AVCodecContext *avctx, const AVPacket *avpkt,
174  unsigned size, unsigned uncompressed_size)
175 {
176  RASCContext *s = avctx->priv_data;
177  z_stream *const zstream = &s->zstream.zstream;
178  GetByteContext *gb = &s->gb;
179  int zret;
180 
181  zret = inflateReset(zstream);
182  if (zret != Z_OK) {
183  av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
184  return AVERROR_EXTERNAL;
185  }
186 
187  av_fast_padded_malloc(&s->delta, &s->delta_size, uncompressed_size);
188  if (!s->delta)
189  return AVERROR(ENOMEM);
190 
191  zstream->next_in = avpkt->data + bytestream2_tell(gb);
192  zstream->avail_in = FFMIN(size, bytestream2_get_bytes_left(gb));
193 
194  zstream->next_out = s->delta;
195  zstream->avail_out = s->delta_size;
196 
197  zret = inflate(zstream, Z_FINISH);
198  if (zret != Z_STREAM_END) {
199  av_log(avctx, AV_LOG_ERROR,
200  "Inflate failed with return code: %d.\n", zret);
201  return AVERROR_INVALIDDATA;
202  }
203 
204  return 0;
205 }
206 
207 static int decode_move(AVCodecContext *avctx,
208  const AVPacket *avpkt, unsigned size)
209 {
210  RASCContext *s = avctx->priv_data;
211  GetByteContext *gb = &s->gb;
213  unsigned pos, compression, nb_moves;
214  unsigned uncompressed_size;
215  int ret;
216 
217  pos = bytestream2_tell(gb);
218  bytestream2_skip(gb, 8);
219  nb_moves = bytestream2_get_le32(gb);
220  bytestream2_skip(gb, 8);
221  compression = bytestream2_get_le32(gb);
222 
223  if (nb_moves > INT32_MAX / 16 || nb_moves > avctx->width * avctx->height)
224  return AVERROR_INVALIDDATA;
225 
226  uncompressed_size = 16 * nb_moves;
227 
228  if (compression == 1) {
229  ret = decode_zlib(avctx, avpkt,
230  size - (bytestream2_tell(gb) - pos),
231  uncompressed_size);
232  if (ret < 0)
233  return ret;
234  bytestream2_init(&mc, s->delta, uncompressed_size);
235  } else if (compression == 0) {
236  bytestream2_init(&mc, avpkt->data + bytestream2_tell(gb),
238  } else if (compression == 2) {
239  avpriv_request_sample(avctx, "compression %d", compression);
240  return AVERROR_PATCHWELCOME;
241  } else {
242  return AVERROR_INVALIDDATA;
243  }
244 
245  if (bytestream2_get_bytes_left(&mc) < uncompressed_size)
246  return AVERROR_INVALIDDATA;
247 
248  for (int i = 0; i < nb_moves; i++) {
249  int type, start_x, start_y, end_x, end_y, mov_x, mov_y;
250  uint8_t *e2, *b1, *b2;
251  int w, h;
252 
253  type = bytestream2_get_le16(&mc);
254  start_x = bytestream2_get_le16(&mc);
255  start_y = bytestream2_get_le16(&mc);
256  end_x = bytestream2_get_le16(&mc);
257  end_y = bytestream2_get_le16(&mc);
258  mov_x = bytestream2_get_le16(&mc);
259  mov_y = bytestream2_get_le16(&mc);
260  bytestream2_skip(&mc, 2);
261 
262  if (start_x >= avctx->width || start_y >= avctx->height ||
263  end_x >= avctx->width || end_y >= avctx->height ||
264  mov_x >= avctx->width || mov_y >= avctx->height) {
265  continue;
266  }
267 
268  if (start_x >= end_x || start_y >= end_y)
269  continue;
270 
271  w = end_x - start_x;
272  h = end_y - start_y;
273 
274  if (mov_x + w > avctx->width || mov_y + h > avctx->height)
275  continue;
276 
277  if (!s->frame2->data[0] || !s->frame1->data[0])
278  return AVERROR_INVALIDDATA;
279 
280  b1 = s->frame1->data[0] + s->frame1->linesize[0] * (start_y + h - 1) + start_x * s->bpp;
281  b2 = s->frame2->data[0] + s->frame2->linesize[0] * (start_y + h - 1) + start_x * s->bpp;
282  e2 = s->frame2->data[0] + s->frame2->linesize[0] * (mov_y + h - 1) + mov_x * s->bpp;
283 
284  if (type == 2) {
285  for (int j = 0; j < h; j++) {
286  memcpy(b1, b2, w * s->bpp);
287  b1 -= s->frame1->linesize[0];
288  b2 -= s->frame2->linesize[0];
289  }
290  } else if (type == 1) {
291  for (int j = 0; j < h; j++) {
292  memset(b2, 0, w * s->bpp);
293  b2 -= s->frame2->linesize[0];
294  }
295  } else if (type == 0) {
296  uint8_t *buffer;
297 
298  av_fast_padded_malloc(&s->delta, &s->delta_size, w * h * s->bpp);
299  buffer = s->delta;
300  if (!buffer)
301  return AVERROR(ENOMEM);
302 
303  for (int j = 0; j < h; j++) {
304  memcpy(buffer + j * w * s->bpp, e2, w * s->bpp);
305  e2 -= s->frame2->linesize[0];
306  }
307 
308  for (int j = 0; j < h; j++) {
309  memcpy(b2, buffer + j * w * s->bpp, w * s->bpp);
310  b2 -= s->frame2->linesize[0];
311  }
312  } else {
313  return AVERROR_INVALIDDATA;
314  }
315  }
316 
317  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
318 
319  return 0;
320 }
321 
322 #define NEXT_LINE \
323  if (cx >= w * s->bpp) { \
324  cx = 0; \
325  cy--; \
326  b1 -= s->frame1->linesize[0]; \
327  b2 -= s->frame2->linesize[0]; \
328  } \
329  len--;
330 
331 static int decode_dlta(AVCodecContext *avctx,
332  const AVPacket *avpkt, unsigned size)
333 {
334  RASCContext *s = avctx->priv_data;
335  GetByteContext *gb = &s->gb;
337  unsigned uncompressed_size, pos;
338  unsigned x, y, w, h;
339  int ret, cx, cy, compression;
340  uint8_t *b1, *b2;
341 
342  pos = bytestream2_tell(gb);
343  bytestream2_skip(gb, 12);
344  uncompressed_size = bytestream2_get_le32(gb);
345  x = bytestream2_get_le32(gb);
346  y = bytestream2_get_le32(gb);
347  w = bytestream2_get_le32(gb);
348  h = bytestream2_get_le32(gb);
349 
350  if (x >= avctx->width || y >= avctx->height ||
351  w > avctx->width || h > avctx->height)
352  return AVERROR_INVALIDDATA;
353 
354  if (x + w > avctx->width || y + h > avctx->height)
355  return AVERROR_INVALIDDATA;
356 
357  bytestream2_skip(gb, 4);
358  compression = bytestream2_get_le32(gb);
359 
360  if (compression == 1) {
361  if (w * h * s->bpp * 3 < uncompressed_size)
362  return AVERROR_INVALIDDATA;
363  ret = decode_zlib(avctx, avpkt, size, uncompressed_size);
364  if (ret < 0)
365  return ret;
366  bytestream2_init(&dc, s->delta, uncompressed_size);
367  } else if (compression == 0) {
368  if (bytestream2_get_bytes_left(gb) < uncompressed_size)
369  return AVERROR_INVALIDDATA;
370  bytestream2_init(&dc, avpkt->data + bytestream2_tell(gb),
371  uncompressed_size);
372  } else if (compression == 2) {
373  avpriv_request_sample(avctx, "compression %d", compression);
374  return AVERROR_PATCHWELCOME;
375  } else {
376  return AVERROR_INVALIDDATA;
377  }
378 
379  if (!s->frame2->data[0] || !s->frame1->data[0])
380  return AVERROR_INVALIDDATA;
381 
382  b1 = s->frame1->data[0] + s->frame1->linesize[0] * (int)(y + h - 1) + ((int)x) * s->bpp;
383  b2 = s->frame2->data[0] + s->frame2->linesize[0] * (int)(y + h - 1) + ((int)x) * s->bpp;
384  cx = 0, cy = h;
385  while (bytestream2_get_bytes_left(&dc) > 0) {
386  int type = bytestream2_get_byte(&dc);
387  int len = bytestream2_get_byte(&dc);
388  unsigned fill;
389 
390  switch (type) {
391  case 1:
392  while (len > 0 && cy > 0) {
393  cx++;
394  NEXT_LINE
395  }
396  break;
397  case 2:
398  while (len > 0 && cy > 0) {
399  int v0 = b1[cx];
400  int v1 = b2[cx];
401 
402  b2[cx] = v0;
403  b1[cx] = v1;
404  cx++;
405  NEXT_LINE
406  }
407  break;
408  case 3:
409  while (len > 0 && cy > 0) {
410  fill = bytestream2_get_byte(&dc);
411  b1[cx] = b2[cx];
412  b2[cx] = fill;
413  cx++;
414  NEXT_LINE
415  }
416  break;
417  case 4:
418  fill = bytestream2_get_byte(&dc);
419  while (len > 0 && cy > 0) {
420  AV_WL32(b1 + cx, AV_RL32(b2 + cx));
421  AV_WL32(b2 + cx, fill);
422  cx++;
423  NEXT_LINE
424  }
425  break;
426  case 7:
427  fill = bytestream2_get_le32(&dc);
428  while (len > 0 && cy > 0) {
429  AV_WL32(b1 + cx, AV_RL32(b2 + cx));
430  AV_WL32(b2 + cx, fill);
431  cx += 4;
432  NEXT_LINE
433  }
434  break;
435  case 10:
436  while (len > 0 && cy > 0) {
437  cx += 4;
438  NEXT_LINE
439  }
440  break;
441  case 12:
442  while (len > 0 && cy > 0) {
443  unsigned v0, v1;
444 
445  v0 = AV_RL32(b2 + cx);
446  v1 = AV_RL32(b1 + cx);
447  AV_WL32(b2 + cx, v1);
448  AV_WL32(b1 + cx, v0);
449  cx += 4;
450  NEXT_LINE
451  }
452  break;
453  case 13:
454  while (len > 0 && cy > 0) {
455  fill = bytestream2_get_le32(&dc);
456  AV_WL32(b1 + cx, AV_RL32(b2 + cx));
457  AV_WL32(b2 + cx, fill);
458  cx += 4;
459  NEXT_LINE
460  }
461  break;
462  default:
463  avpriv_request_sample(avctx, "runlen %d", type);
464  return AVERROR_INVALIDDATA;
465  }
466  }
467 
468  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
469 
470  return 0;
471 }
472 
473 static int decode_kfrm(AVCodecContext *avctx,
474  const AVPacket *avpkt, unsigned size)
475 {
476  RASCContext *s = avctx->priv_data;
477  z_stream *const zstream = &s->zstream.zstream;
478  GetByteContext *gb = &s->gb;
479  uint8_t *dst;
480  unsigned pos;
481  int zret, ret;
482 
483  pos = bytestream2_tell(gb);
484  if (bytestream2_peek_le32(gb) == 0x65) {
485  ret = decode_fint(avctx, avpkt, size);
486  if (ret < 0)
487  return ret;
488  }
489 
490  if (!s->frame2->data[0])
491  return AVERROR_INVALIDDATA;
492 
493  zret = inflateReset(zstream);
494  if (zret != Z_OK) {
495  av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
496  return AVERROR_EXTERNAL;
497  }
498 
499  zstream->next_in = avpkt->data + bytestream2_tell(gb);
500  zstream->avail_in = bytestream2_get_bytes_left(gb);
501 
502  dst = s->frame2->data[0] + (avctx->height - 1) * s->frame2->linesize[0];
503  for (int i = 0; i < avctx->height; i++) {
504  zstream->next_out = dst;
505  zstream->avail_out = s->stride;
506 
507  zret = inflate(zstream, Z_SYNC_FLUSH);
508  if (zret != Z_OK && zret != Z_STREAM_END) {
509  av_log(avctx, AV_LOG_ERROR,
510  "Inflate failed with return code: %d.\n", zret);
511  return AVERROR_INVALIDDATA;
512  }
513 
514  dst -= s->frame2->linesize[0];
515  }
516 
517  dst = s->frame1->data[0] + (avctx->height - 1) * s->frame1->linesize[0];
518  for (int i = 0; i < avctx->height; i++) {
519  zstream->next_out = dst;
520  zstream->avail_out = s->stride;
521 
522  zret = inflate(zstream, Z_SYNC_FLUSH);
523  if (zret != Z_OK && zret != Z_STREAM_END) {
524  av_log(avctx, AV_LOG_ERROR,
525  "Inflate failed with return code: %d.\n", zret);
526  return AVERROR_INVALIDDATA;
527  }
528 
529  dst -= s->frame1->linesize[0];
530  }
531 
532  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
533 
534  return 0;
535 }
536 
537 static int decode_mous(AVCodecContext *avctx,
538  const AVPacket *avpkt, unsigned size)
539 {
540  RASCContext *s = avctx->priv_data;
541  GetByteContext *gb = &s->gb;
542  unsigned w, h, pos, uncompressed_size;
543  int ret;
544 
545  pos = bytestream2_tell(gb);
546  bytestream2_skip(gb, 8);
547  w = bytestream2_get_le32(gb);
548  h = bytestream2_get_le32(gb);
549  bytestream2_skip(gb, 12);
550  uncompressed_size = bytestream2_get_le32(gb);
551 
552  if (w > avctx->width || h > avctx->height)
553  return AVERROR_INVALIDDATA;
554 
555  if (uncompressed_size != 3 * w * h)
556  return AVERROR_INVALIDDATA;
557 
558  av_fast_padded_malloc(&s->cursor, &s->cursor_size, uncompressed_size);
559  if (!s->cursor)
560  return AVERROR(ENOMEM);
561 
562  ret = decode_zlib(avctx, avpkt,
563  size - (bytestream2_tell(gb) - pos),
564  uncompressed_size);
565  if (ret < 0)
566  return ret;
567  memcpy(s->cursor, s->delta, uncompressed_size);
568 
569  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
570 
571  s->cursor_w = w;
572  s->cursor_h = h;
573 
574  return 0;
575 }
576 
577 static int decode_mpos(AVCodecContext *avctx,
578  const AVPacket *avpkt, unsigned size)
579 {
580  RASCContext *s = avctx->priv_data;
581  GetByteContext *gb = &s->gb;
582  unsigned pos;
583 
584  pos = bytestream2_tell(gb);
585  bytestream2_skip(gb, 8);
586  s->cursor_x = bytestream2_get_le32(gb);
587  s->cursor_y = bytestream2_get_le32(gb);
588 
589  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
590 
591  return 0;
592 }
593 
594 static void draw_cursor(AVCodecContext *avctx)
595 {
596  RASCContext *s = avctx->priv_data;
597  uint8_t *dst, *pal;
598 
599  if (!s->cursor)
600  return;
601 
602  if (s->cursor_x >= avctx->width || s->cursor_y >= avctx->height)
603  return;
604 
605  if (s->cursor_x + s->cursor_w > avctx->width ||
606  s->cursor_y + s->cursor_h > avctx->height)
607  return;
608 
609  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
610  pal = s->frame->data[1];
611  for (int i = 0; i < s->cursor_h; i++) {
612  for (int j = 0; j < s->cursor_w; j++) {
613  int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
614  int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
615  int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
616  int best = INT_MAX;
617  int index = 0;
618  int dist;
619 
620  if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
621  continue;
622 
623  dst = s->frame->data[0] + s->frame->linesize[0] * (int)(s->cursor_y + i) + (int)(s->cursor_x + j);
624  for (int k = 0; k < 256; k++) {
625  int pr = pal[k * 4 + 0];
626  int pg = pal[k * 4 + 1];
627  int pb = pal[k * 4 + 2];
628 
629  dist = FFABS(cr - pr) + FFABS(cg - pg) + FFABS(cb - pb);
630  if (dist < best) {
631  best = dist;
632  index = k;
633  }
634  }
635  dst[0] = index;
636  }
637  }
638  } else if (avctx->pix_fmt == AV_PIX_FMT_RGB555LE) {
639  for (int i = 0; i < s->cursor_h; i++) {
640  for (int j = 0; j < s->cursor_w; j++) {
641  int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
642  int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
643  int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
644 
645  if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
646  continue;
647 
648  cr >>= 3; cg >>=3; cb >>= 3;
649  dst = s->frame->data[0] + s->frame->linesize[0] * (int)(s->cursor_y + i) + 2 * (s->cursor_x + j);
650  AV_WL16(dst, cr | cg << 5 | cb << 10);
651  }
652  }
653  } else if (avctx->pix_fmt == AV_PIX_FMT_BGR0) {
654  for (int i = 0; i < s->cursor_h; i++) {
655  for (int j = 0; j < s->cursor_w; j++) {
656  int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
657  int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
658  int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
659 
660  if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
661  continue;
662 
663  dst = s->frame->data[0] + s->frame->linesize[0] * (int)(s->cursor_y + i) + 4 * (s->cursor_x + j);
664  dst[0] = cb;
665  dst[1] = cg;
666  dst[2] = cr;
667  }
668  }
669  }
670 }
671 
673  int *got_frame, AVPacket *avpkt)
674 {
675  RASCContext *s = avctx->priv_data;
676  GetByteContext *gb = &s->gb;
677  int ret, intra = 0;
678 
679  bytestream2_init(gb, avpkt->data, avpkt->size);
680 
681  if (bytestream2_peek_le32(gb) == EMPT)
682  return avpkt->size;
683 
684  s->frame = frame;
685 
686  while (bytestream2_get_bytes_left(gb) > 0) {
687  unsigned type, size = 0;
688 
689  if (bytestream2_get_bytes_left(gb) < 8)
690  return AVERROR_INVALIDDATA;
691 
692  type = bytestream2_get_le32(gb);
693  if (type == KBND || type == BNDL) {
694  intra = type == KBND;
695  type = bytestream2_get_le32(gb);
696  }
697 
698  size = bytestream2_get_le32(gb);
700  return AVERROR_INVALIDDATA;
701 
702  switch (type) {
703  case FINT:
704  case INIT:
705  ret = decode_fint(avctx, avpkt, size);
706  break;
707  case KFRM:
708  ret = decode_kfrm(avctx, avpkt, size);
709  break;
710  case DLTA:
711  ret = decode_dlta(avctx, avpkt, size);
712  break;
713  case MOVE:
714  ret = decode_move(avctx, avpkt, size);
715  break;
716  case MOUS:
717  ret = decode_mous(avctx, avpkt, size);
718  break;
719  case MPOS:
720  ret = decode_mpos(avctx, avpkt, size);
721  break;
722  default:
723  bytestream2_skip(gb, size);
724  ret = 0;
725  }
726 
727  if (ret < 0)
728  return ret;
729  }
730 
731  if (!s->frame2->data[0] || !s->frame1->data[0])
732  return AVERROR_INVALIDDATA;
733 
734  if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0)
735  return ret;
736 
737  copy_plane(avctx, s->frame2, s->frame);
738  if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
739  memcpy(s->frame->data[1], s->frame2->data[1], 1024);
740  if (!s->skip_cursor)
741  draw_cursor(avctx);
742 
743  if (intra)
744  s->frame->flags |= AV_FRAME_FLAG_KEY;
745  else
746  s->frame->flags &= ~AV_FRAME_FLAG_KEY;
747  s->frame->pict_type = intra ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
748 
749  *got_frame = 1;
750 
751  return avpkt->size;
752 }
753 
755 {
756  RASCContext *s = avctx->priv_data;
757 
758  s->frame1 = av_frame_alloc();
759  s->frame2 = av_frame_alloc();
760  if (!s->frame1 || !s->frame2)
761  return AVERROR(ENOMEM);
762 
763  return ff_inflate_init(&s->zstream, avctx);
764 }
765 
767 {
768  RASCContext *s = avctx->priv_data;
769 
770  av_freep(&s->cursor);
771  s->cursor_size = 0;
772  av_freep(&s->delta);
773  s->delta_size = 0;
774  av_frame_free(&s->frame1);
775  av_frame_free(&s->frame2);
776  ff_inflate_end(&s->zstream);
777 
778  return 0;
779 }
780 
781 static void decode_flush(AVCodecContext *avctx)
782 {
783  RASCContext *s = avctx->priv_data;
784 
785  clear_plane(avctx, s->frame1);
786  clear_plane(avctx, s->frame2);
787 }
788 
789 static const AVOption options[] = {
790 { "skip_cursor", "skip the cursor", offsetof(RASCContext, skip_cursor), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
791 { NULL },
792 };
793 
794 static const AVClass rasc_decoder_class = {
795  .class_name = "rasc decoder",
796  .item_name = av_default_item_name,
797  .option = options,
798  .version = LIBAVUTIL_VERSION_INT,
799 };
800 
802  .p.name = "rasc",
803  CODEC_LONG_NAME("RemotelyAnywhere Screen Capture"),
804  .p.type = AVMEDIA_TYPE_VIDEO,
805  .p.id = AV_CODEC_ID_RASC,
806  .priv_data_size = sizeof(RASCContext),
807  .init = decode_init,
808  .close = decode_close,
810  .flush = decode_flush,
811  .p.capabilities = AV_CODEC_CAP_DR1,
812  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
813  .p.priv_class = &rasc_decoder_class,
814 };
RASCContext
Definition: rasc.c:47
init_frames
static int init_frames(AVCodecContext *avctx)
Definition: rasc.c:94
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
BNDL
#define BNDL
Definition: rasc.c:39
opt.h
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:424
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:241
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:250
FINT
#define FINT
Definition: rasc.c:37
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
RASCContext::delta_size
int delta_size
Definition: rasc.c:52
w
uint8_t w
Definition: llviddspenc.c:38
RASCContext::zstream
FFZStream zstream
Definition: rasc.c:64
AVPacket::data
uint8_t * data
Definition: packet.h:522
AVOption
AVOption.
Definition: opt.h:346
RASCContext::frame
AVFrame * frame
Definition: rasc.c:61
FFCodec
Definition: codec_internal.h:127
options
static const AVOption options[]
Definition: rasc.c:789
EMPT
#define EMPT
Definition: rasc.c:45
decode_fint
static int decode_fint(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:113
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:396
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
decode_dlta
static int decode_dlta(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:331
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
b1
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:2035
v0
#define v0
Definition: regdef.h:26
inflate
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:194
NEXT_LINE
#define NEXT_LINE
Definition: rasc.c:322
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
ff_rasc_decoder
const FFCodec ff_rasc_decoder
Definition: rasc.c:801
copy_plane
static void copy_plane(AVCodecContext *avctx, AVFrame *src, AVFrame *dst)
Definition: rasc.c:81
RASCContext::frame2
AVFrame * frame2
Definition: rasc.c:63
RASCContext::cursor_x
unsigned cursor_x
Definition: rasc.c:57
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
zlib_wrapper.h
av_cold
#define av_cold
Definition: attributes.h:90
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:626
RASCContext::stride
int stride
Definition: rasc.c:59
rasc_decoder_class
static const AVClass rasc_decoder_class
Definition: rasc.c:794
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:287
s
#define s(width, name)
Definition: cbs_vp9.c:198
RASCContext::cursor_h
unsigned cursor_h
Definition: rasc.c:56
decode_kfrm
static int decode_kfrm(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:473
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: rasc.c:781
decode.h
draw_cursor
static void draw_cursor(AVCodecContext *avctx)
Definition: rasc.c:594
decode_move
static int decode_move(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:207
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
INIT
#define INIT
Definition: rasc.c:38
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
decode_mous
static int decode_mous(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:537
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:265
MOVE
#define MOVE
Definition: rasc.c:44
RASCContext::cursor_y
unsigned cursor_y
Definition: rasc.c:58
RASCContext::skip_cursor
int skip_cursor
Definition: rasc.c:49
KBND
#define KBND
Definition: rasc.c:36
index
int index
Definition: gxfenc.c:89
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
DLTA
#define DLTA
Definition: rasc.c:41
decode_zlib
static int decode_zlib(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size, unsigned uncompressed_size)
Definition: rasc.c:173
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1553
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:365
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
RASCContext::cursor_size
int cursor_size
Definition: rasc.c:54
AVPacket::size
int size
Definition: packet.h:523
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
codec_internal.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AV_CODEC_ID_RASC
@ AV_CODEC_ID_RASC
Definition: codec_id.h:291
size
int size
Definition: twinvq_data.h:10344
RASCContext::cursor
uint8_t * cursor
Definition: rasc.c:53
RASCContext::cursor_w
unsigned cursor_w
Definition: rasc.c:55
RASCContext::bpp
int bpp
Definition: rasc.c:60
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:410
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:2036
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
KFRM
#define KFRM
Definition: rasc.c:40
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:115
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
decode_mpos
static int decode_mpos(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:577
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: rasc.c:754
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:275
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:606
MPOS
#define MPOS
Definition: rasc.c:43
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
len
int len
Definition: vorbis_enc_data.h:426
ff_inflate_end
void ff_inflate_end(FFZStream *zstream)
Wrapper around inflateEnd().
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
avcodec.h
decode_close
static av_cold int decode_close(AVCodecContext *avctx)
Definition: rasc.c:766
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pos
unsigned int pos
Definition: spdifenc.c:413
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
AVCodecContext
main external API structure.
Definition: avcodec.h:445
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
clear_plane
static void clear_plane(AVCodecContext *avctx, AVFrame *frame)
Definition: rasc.c:67
MOUS
#define MOUS
Definition: rasc.c:42
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
Definition: opt.h:273
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
FFZStream
Definition: zlib_wrapper.h:27
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:242
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
ff_inflate_init
int ff_inflate_init(FFZStream *zstream, void *logctx)
Wrapper around inflateInit().
RASCContext::gb
GetByteContext gb
Definition: rasc.c:50
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: rasc.c:672
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:420
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
RASCContext::delta
uint8_t * delta
Definition: rasc.c:51
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
h
h
Definition: vp9dsp_template.c:2038
int
int
Definition: ffmpeg_filter.c:410
RASCContext::frame1
AVFrame * frame1
Definition: rasc.c:62
mc
#define mc
Definition: vf_colormatrix.c:100