FFmpeg
dxtory.c
Go to the documentation of this file.
1 /*
2  * Dxtory decoder
3  *
4  * Copyright (c) 2011 Konstantin Shishkov
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <inttypes.h>
24 
25 #include "libavutil/common.h"
26 #include "libavutil/intreadwrite.h"
27 
28 #define BITSTREAM_READER_LE
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "get_bits.h"
32 #include "internal.h"
33 #include "unary.h"
34 #include "thread.h"
35 
36 static int64_t get_raw_size(enum AVPixelFormat fmt, int width, int height)
37 {
38  switch (fmt) {
41  return width * height * 2LL;
42  case AV_PIX_FMT_RGB24:
43  case AV_PIX_FMT_BGR24:
44  case AV_PIX_FMT_YUV444P:
45  return width * height * 3LL;
46  case AV_PIX_FMT_YUV420P:
47  return (int64_t)(width * height) + 2 * AV_CEIL_RSHIFT(width, 1) * AV_CEIL_RSHIFT(height, 1);
48  case AV_PIX_FMT_YUV410P:
49  return (int64_t)(width * height) + 2 * AV_CEIL_RSHIFT(width, 2) * AV_CEIL_RSHIFT(height, 2);
50  }
51 
52  return 0;
53 }
54 
55 static void do_vflip(AVCodecContext *avctx, AVFrame *pic, int vflip)
56 {
57  if (!vflip)
58  return;
59 
60  switch (pic->format) {
61  case AV_PIX_FMT_YUV444P:
62  pic->data[1] += (avctx->height - 1) * pic->linesize[1];
63  pic->linesize[1] = -pic->linesize[1];
64  pic->data[2] += (avctx->height - 1) * pic->linesize[2];
65  pic->linesize[2] = -pic->linesize[2];
68  case AV_PIX_FMT_BGR24:
69  case AV_PIX_FMT_RGB24:
70  pic->data[0] += (avctx->height - 1) * pic->linesize[0];
71  pic->linesize[0] = -pic->linesize[0];
72  break;
73  case AV_PIX_FMT_YUV410P:
74  pic->data[0] += (avctx->height - 1) * pic->linesize[0];
75  pic->linesize[0] = -pic->linesize[0];
76  pic->data[1] += (AV_CEIL_RSHIFT(avctx->height, 2) - 1) * pic->linesize[1];
77  pic->linesize[1] = -pic->linesize[1];
78  pic->data[2] += (AV_CEIL_RSHIFT(avctx->height, 2) - 1) * pic->linesize[2];
79  pic->linesize[2] = -pic->linesize[2];
80  break;
81  case AV_PIX_FMT_YUV420P:
82  pic->data[0] += (avctx->height - 1) * pic->linesize[0];
83  pic->linesize[0] = -pic->linesize[0];
84  pic->data[1] += (AV_CEIL_RSHIFT(avctx->height, 1) - 1) * pic->linesize[1];
85  pic->linesize[1] = -pic->linesize[1];
86  pic->data[2] += (AV_CEIL_RSHIFT(avctx->height, 1) - 1) * pic->linesize[2];
87  pic->linesize[2] = -pic->linesize[2];
88  break;
89  }
90 }
91 
93  const uint8_t *src, int src_size,
94  int id, int bpp, uint32_t vflipped)
95 {
96  ThreadFrame frame = { .f = pic };
97  int h;
98  uint8_t *dst;
99  int ret;
100 
101  if (src_size < get_raw_size(id, avctx->width, avctx->height)) {
102  av_log(avctx, AV_LOG_ERROR, "packet too small\n");
103  return AVERROR_INVALIDDATA;
104  }
105 
106  avctx->pix_fmt = id;
107  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
108  return ret;
109 
110  do_vflip(avctx, pic, vflipped);
111 
112  dst = pic->data[0];
113  for (h = 0; h < avctx->height; h++) {
114  memcpy(dst, src, avctx->width * bpp);
115  src += avctx->width * bpp;
116  dst += pic->linesize[0];
117  }
118 
119  do_vflip(avctx, pic, vflipped);
120 
121  return 0;
122 }
123 
125  const uint8_t *src, int src_size,
126  uint32_t vflipped)
127 {
128  ThreadFrame frame = { .f = pic };
129  int h, w;
130  uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
131  int height, width, hmargin, vmargin;
132  int huvborder;
133  int ret;
134 
135  if (src_size < get_raw_size(AV_PIX_FMT_YUV410P, avctx->width, avctx->height)) {
136  av_log(avctx, AV_LOG_ERROR, "packet too small\n");
137  return AVERROR_INVALIDDATA;
138  }
139 
140  avctx->pix_fmt = AV_PIX_FMT_YUV410P;
141  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
142  return ret;
143 
144  do_vflip(avctx, pic, vflipped);
145 
146  height = avctx->height & ~3;
147  width = avctx->width & ~3;
148  hmargin = avctx->width - width;
149  vmargin = avctx->height - height;
150  huvborder = AV_CEIL_RSHIFT(avctx->width, 2) - 1;
151 
152  Y1 = pic->data[0];
153  Y2 = pic->data[0] + pic->linesize[0];
154  Y3 = pic->data[0] + pic->linesize[0] * 2;
155  Y4 = pic->data[0] + pic->linesize[0] * 3;
156  U = pic->data[1];
157  V = pic->data[2];
158  for (h = 0; h < height; h += 4) {
159  for (w = 0; w < width; w += 4) {
160  AV_COPY32U(Y1 + w, src);
161  AV_COPY32U(Y2 + w, src + 4);
162  AV_COPY32U(Y3 + w, src + 8);
163  AV_COPY32U(Y4 + w, src + 12);
164  U[w >> 2] = src[16] + 0x80;
165  V[w >> 2] = src[17] + 0x80;
166  src += 18;
167  }
168  if (hmargin) {
169  for (w = 0; w < hmargin; w++) {
170  Y1[width + w] = src[w];
171  Y2[width + w] = src[w + hmargin * 1];
172  Y3[width + w] = src[w + hmargin * 2];
173  Y4[width + w] = src[w + hmargin * 3];
174  }
175  src += 4 * hmargin;
176  U[huvborder] = src[0] + 0x80;
177  V[huvborder] = src[1] + 0x80;
178  src += 2;
179  }
180  Y1 += pic->linesize[0] * 4;
181  Y2 += pic->linesize[0] * 4;
182  Y3 += pic->linesize[0] * 4;
183  Y4 += pic->linesize[0] * 4;
184  U += pic->linesize[1];
185  V += pic->linesize[2];
186  }
187 
188  if (vmargin) {
189  for (w = 0; w < width; w += 4) {
190  AV_COPY32U(Y1 + w, src);
191  if (vmargin > 1)
192  AV_COPY32U(Y2 + w, src + 4);
193  if (vmargin > 2)
194  AV_COPY32U(Y3 + w, src + 8);
195  src += 4 * vmargin;
196  U[w >> 2] = src[0] + 0x80;
197  V[w >> 2] = src[1] + 0x80;
198  src += 2;
199  }
200  if (hmargin) {
201  for (w = 0; w < hmargin; w++) {
202  AV_COPY32U(Y1 + w, src);
203  if (vmargin > 1)
204  AV_COPY32U(Y2 + w, src + 4);
205  if (vmargin > 2)
206  AV_COPY32U(Y3 + w, src + 8);
207  src += 4 * vmargin;
208  }
209  U[huvborder] = src[0] + 0x80;
210  V[huvborder] = src[1] + 0x80;
211  src += 2;
212  }
213  }
214 
215  do_vflip(avctx, pic, vflipped);
216 
217  return 0;
218 }
219 
221  const uint8_t *src, int src_size,
222  uint32_t vflipped)
223 {
224  ThreadFrame frame = { .f = pic };
225  int h, w;
226  uint8_t *Y1, *Y2, *U, *V;
227  int height, width, hmargin, vmargin;
228  int huvborder;
229  int ret;
230 
231  if (src_size < get_raw_size(AV_PIX_FMT_YUV420P, avctx->width, avctx->height)) {
232  av_log(avctx, AV_LOG_ERROR, "packet too small\n");
233  return AVERROR_INVALIDDATA;
234  }
235 
236  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
237  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
238  return ret;
239 
240  do_vflip(avctx, pic, vflipped);
241 
242  height = avctx->height & ~1;
243  width = avctx->width & ~1;
244  hmargin = avctx->width - width;
245  vmargin = avctx->height - height;
246  huvborder = AV_CEIL_RSHIFT(avctx->width, 1) - 1;
247 
248  Y1 = pic->data[0];
249  Y2 = pic->data[0] + pic->linesize[0];
250  U = pic->data[1];
251  V = pic->data[2];
252  for (h = 0; h < height; h += 2) {
253  for (w = 0; w < width; w += 2) {
254  AV_COPY16(Y1 + w, src);
255  AV_COPY16(Y2 + w, src + 2);
256  U[w >> 1] = src[4] + 0x80;
257  V[w >> 1] = src[5] + 0x80;
258  src += 6;
259  }
260  if (hmargin) {
261  Y1[width + 1] = src[0];
262  Y2[width + 1] = src[1];
263  U[huvborder] = src[2] + 0x80;
264  V[huvborder] = src[3] + 0x80;
265  src += 4;
266  }
267  Y1 += pic->linesize[0] * 2;
268  Y2 += pic->linesize[0] * 2;
269  U += pic->linesize[1];
270  V += pic->linesize[2];
271  }
272 
273  if (vmargin) {
274  for (w = 0; w < width; w += 2) {
275  AV_COPY16U(Y1 + w, src);
276  U[w >> 1] = src[0] + 0x80;
277  V[w >> 1] = src[1] + 0x80;
278  src += 4;
279  }
280  if (hmargin) {
281  Y1[w] = src[0];
282  U[huvborder] = src[1] + 0x80;
283  V[huvborder] = src[2] + 0x80;
284  src += 3;
285  }
286  }
287 
288  do_vflip(avctx, pic, vflipped);
289 
290  return 0;
291 }
292 
294  const uint8_t *src, int src_size,
295  uint32_t vflipped)
296 {
297  ThreadFrame frame = { .f = pic };
298  int h, w;
299  uint8_t *Y, *U, *V;
300  int ret;
301 
302  if (src_size < get_raw_size(AV_PIX_FMT_YUV444P, avctx->width, avctx->height)) {
303  av_log(avctx, AV_LOG_ERROR, "packet too small\n");
304  return AVERROR_INVALIDDATA;
305  }
306 
307  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
308  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
309  return ret;
310 
311  do_vflip(avctx, pic, vflipped);
312 
313  Y = pic->data[0];
314  U = pic->data[1];
315  V = pic->data[2];
316  for (h = 0; h < avctx->height; h++) {
317  for (w = 0; w < avctx->width; w++) {
318  Y[w] = *src++;
319  U[w] = *src++ ^ 0x80;
320  V[w] = *src++ ^ 0x80;
321  }
322  Y += pic->linesize[0];
323  U += pic->linesize[1];
324  V += pic->linesize[2];
325  }
326 
327  do_vflip(avctx, pic, vflipped);
328 
329  return 0;
330 }
331 
332 static const uint8_t def_lru[8] = { 0x00, 0x20, 0x40, 0x60, 0x80, 0xA0, 0xC0, 0xFF };
333 static const uint8_t def_lru_555[8] = { 0x00, 0x08, 0x10, 0x18, 0x1F };
334 static const uint8_t def_lru_565[8] = { 0x00, 0x08, 0x10, 0x20, 0x30, 0x3F };
335 
336 static inline uint8_t decode_sym(GetBitContext *gb, uint8_t lru[8])
337 {
338  uint8_t c, val;
339 
340  c = get_unary(gb, 0, 8);
341  if (!c) {
342  val = get_bits(gb, 8);
343  memmove(lru + 1, lru, sizeof(*lru) * (8 - 1));
344  } else {
345  val = lru[c - 1];
346  memmove(lru + 1, lru, sizeof(*lru) * (c - 1));
347  }
348  lru[0] = val;
349 
350  return val;
351 }
352 
354  const uint8_t *src, int src_size,
355  int slice_size, int off)
356 {
357  int cur_slice_size;
358 
359  if (slice_size > src_size - off) {
360  av_log(avctx, AV_LOG_ERROR,
361  "invalid slice size %d (only %d bytes left)\n",
362  slice_size, src_size - off);
363  return AVERROR_INVALIDDATA;
364  }
365  if (slice_size <= 16) {
366  av_log(avctx, AV_LOG_ERROR, "invalid slice size %d\n",
367  slice_size);
368  return AVERROR_INVALIDDATA;
369  }
370 
371  cur_slice_size = AV_RL32(src + off);
372  if (cur_slice_size != slice_size - 16) {
373  av_log(avctx, AV_LOG_ERROR,
374  "Slice sizes mismatch: got %d instead of %d\n",
375  cur_slice_size, slice_size - 16);
376  }
377 
378  return 0;
379 }
380 
381 static int load_buffer(AVCodecContext *avctx,
382  const uint8_t *src, int src_size,
383  GetByteContext *gb,
384  int *nslices, int *off)
385 {
386  bytestream2_init(gb, src, src_size);
387  *nslices = bytestream2_get_le16(gb);
388  *off = FFALIGN(*nslices * 4 + 2, 16);
389  if (src_size < *off) {
390  av_log(avctx, AV_LOG_ERROR, "no slice data\n");
391  return AVERROR_INVALIDDATA;
392  }
393 
394  if (!*nslices) {
395  avpriv_request_sample(avctx, "%d slices for %dx%d", *nslices,
396  avctx->width, avctx->height);
397  return AVERROR_PATCHWELCOME;
398  }
399 
400  return 0;
401 }
402 
403 static inline uint8_t decode_sym_565(GetBitContext *gb, uint8_t lru[8],
404  int bits)
405 {
406  uint8_t c, val;
407 
408  c = get_unary(gb, 0, bits);
409  if (!c) {
410  val = get_bits(gb, bits);
411  memmove(lru + 1, lru, sizeof(*lru) * (6 - 1));
412  } else {
413  val = lru[c - 1];
414  memmove(lru + 1, lru, sizeof(*lru) * (c - 1));
415  }
416  lru[0] = val;
417 
418  return val;
419 }
420 
422  int line, int height, uint8_t lru[3][8]);
423 
424 typedef void (*setup_lru_func)(uint8_t lru[3][8]);
425 
426 static int dxtory_decode_v2(AVCodecContext *avctx, AVFrame *pic,
427  const uint8_t *src, int src_size,
429  setup_lru_func setup_lru,
430  enum AVPixelFormat fmt,
431  uint32_t vflipped)
432 {
433  ThreadFrame frame = { .f = pic };
434  GetByteContext gb, gb_check;
435  GetBitContext gb2;
436  int nslices, slice, line = 0;
437  uint32_t off, slice_size;
438  uint64_t off_check;
439  uint8_t lru[3][8];
440  int ret;
441 
442  ret = load_buffer(avctx, src, src_size, &gb, &nslices, &off);
443  if (ret < 0)
444  return ret;
445 
446  off_check = off;
447  gb_check = gb;
448  for (slice = 0; slice < nslices; slice++) {
449  slice_size = bytestream2_get_le32(&gb_check);
450 
451  if (slice_size <= 16 + (avctx->height * avctx->width / (8 * nslices)))
452  return AVERROR_INVALIDDATA;
453  off_check += slice_size;
454  }
455 
456  if (off_check - avctx->discard_damaged_percentage*off_check/100 > src_size)
457  return AVERROR_INVALIDDATA;
458 
459  avctx->pix_fmt = fmt;
460  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
461  return ret;
462 
463  do_vflip(avctx, pic, vflipped);
464 
465  for (slice = 0; slice < nslices; slice++) {
466  slice_size = bytestream2_get_le32(&gb);
467 
468  setup_lru(lru);
469 
470  ret = check_slice_size(avctx, src, src_size, slice_size, off);
471  if (ret < 0)
472  return ret;
473 
474  if ((ret = init_get_bits8(&gb2, src + off + 16, slice_size - 16)) < 0)
475  return ret;
476 
477  line += decode_slice(&gb2, pic, line, avctx->height - line, lru);
478 
479  off += slice_size;
480  }
481 
482  if (avctx->height - line) {
483  avpriv_request_sample(avctx, "Not enough slice data available");
484  }
485 
486  do_vflip(avctx, pic, vflipped);
487 
488  return 0;
489 }
490 
493  int line, int left, uint8_t lru[3][8],
494  int is_565)
495 {
496  int x, y;
497  int r, g, b;
498  int width = frame->width;
499  int stride = frame->linesize[0];
500  uint8_t *dst = frame->data[0] + stride * line;
501 
502  for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
503  for (x = 0; x < width; x++) {
504  b = decode_sym_565(gb, lru[0], 5);
505  g = decode_sym_565(gb, lru[1], is_565 ? 6 : 5);
506  r = decode_sym_565(gb, lru[2], 5);
507  dst[x * 3 + 0] = (r << 3) | (r >> 2);
508  dst[x * 3 + 1] = is_565 ? (g << 2) | (g >> 4) : (g << 3) | (g >> 2);
509  dst[x * 3 + 2] = (b << 3) | (b >> 2);
510  }
511 
512  dst += stride;
513  }
514 
515  return y;
516 }
517 
518 static void setup_lru_555(uint8_t lru[3][8])
519 {
520  memcpy(lru[0], def_lru_555, 8 * sizeof(*def_lru));
521  memcpy(lru[1], def_lru_555, 8 * sizeof(*def_lru));
522  memcpy(lru[2], def_lru_555, 8 * sizeof(*def_lru));
523 }
524 
525 static void setup_lru_565(uint8_t lru[3][8])
526 {
527  memcpy(lru[0], def_lru_555, 8 * sizeof(*def_lru));
528  memcpy(lru[1], def_lru_565, 8 * sizeof(*def_lru));
529  memcpy(lru[2], def_lru_555, 8 * sizeof(*def_lru));
530 }
531 
533  int line, int left, uint8_t lru[3][8])
534 {
535  return dx2_decode_slice_5x5(gb, frame, line, left, lru, 0);
536 }
537 
539  int line, int left, uint8_t lru[3][8])
540 {
541  return dx2_decode_slice_5x5(gb, frame, line, left, lru, 1);
542 }
543 
545  const uint8_t *src, int src_size, int is_565,
546  uint32_t vflipped)
547 {
548  enum AVPixelFormat fmt = AV_PIX_FMT_RGB24;
549  if (is_565)
550  return dxtory_decode_v2(avctx, pic, src, src_size,
553  fmt, vflipped);
554  else
555  return dxtory_decode_v2(avctx, pic, src, src_size,
558  fmt, vflipped);
559 }
560 
562  int line, int left, uint8_t lru[3][8])
563 {
564  int x, y;
565  int width = frame->width;
566  int stride = frame->linesize[0];
567  uint8_t *dst = frame->data[0] + stride * line;
568 
569  for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
570  for (x = 0; x < width; x++) {
571  dst[x * 3 + 0] = decode_sym(gb, lru[0]);
572  dst[x * 3 + 1] = decode_sym(gb, lru[1]);
573  dst[x * 3 + 2] = decode_sym(gb, lru[2]);
574  }
575 
576  dst += stride;
577  }
578 
579  return y;
580 }
581 
582 static void default_setup_lru(uint8_t lru[3][8])
583 {
584  int i;
585 
586  for (i = 0; i < 3; i++)
587  memcpy(lru[i], def_lru, 8 * sizeof(*def_lru));
588 }
589 
591  const uint8_t *src, int src_size,
592  uint32_t vflipped)
593 {
594  return dxtory_decode_v2(avctx, pic, src, src_size,
597  AV_PIX_FMT_BGR24, vflipped);
598 }
599 
601  int line, int left,
602  uint8_t lru[3][8])
603 {
604  int x, y, i, j;
605  int width = frame->width;
606 
607  int ystride = frame->linesize[0];
608  int ustride = frame->linesize[1];
609  int vstride = frame->linesize[2];
610 
611  uint8_t *Y = frame->data[0] + ystride * line;
612  uint8_t *U = frame->data[1] + (ustride >> 2) * line;
613  uint8_t *V = frame->data[2] + (vstride >> 2) * line;
614 
615  int h, w, hmargin, vmargin;
616  int huvborder;
617 
618  h = frame->height & ~3;
619  w = frame->width & ~3;
620  hmargin = frame->width - w;
621  vmargin = frame->height - h;
622  huvborder = AV_CEIL_RSHIFT(frame->width, 2) - 1;
623 
624  for (y = 0; y < left - 3 && get_bits_left(gb) >= 18 * w / 4 + hmargin * 4 + (!!hmargin * 2); y += 4) {
625  for (x = 0; x < w; x += 4) {
626  for (j = 0; j < 4; j++)
627  for (i = 0; i < 4; i++)
628  Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
629  U[x >> 2] = decode_sym(gb, lru[1]) ^ 0x80;
630  V[x >> 2] = decode_sym(gb, lru[2]) ^ 0x80;
631  }
632  if (hmargin) {
633  for (j = 0; j < 4; j++)
634  for (i = 0; i < hmargin; i++)
635  Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
636  U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
637  V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
638  }
639 
640  Y += ystride * 4;
641  U += ustride;
642  V += vstride;
643  }
644 
645  if (vmargin && y + vmargin == left) {
646  for (x = 0; x < width; x += 4) {
647  for (j = 0; j < vmargin; j++)
648  for (i = 0; i < 4; i++)
649  Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
650  U[x >> 2] = decode_sym(gb, lru[1]) ^ 0x80;
651  V[x >> 2] = decode_sym(gb, lru[2]) ^ 0x80;
652  }
653  if (hmargin) {
654  for (j = 0; j < vmargin; j++) {
655  for (i = 0; i < hmargin; i++)
656  Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
657  }
658  U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
659  V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
660  }
661 
662  y += vmargin;
663  }
664 
665  return y;
666 }
667 
668 
670  const uint8_t *src, int src_size,
671  uint32_t vflipped)
672 {
673  return dxtory_decode_v2(avctx, pic, src, src_size,
676  AV_PIX_FMT_YUV410P, vflipped);
677 }
678 
680  int line, int left,
681  uint8_t lru[3][8])
682 {
683  int x, y;
684 
685  int width = frame->width;
686 
687  int ystride = frame->linesize[0];
688  int ustride = frame->linesize[1];
689  int vstride = frame->linesize[2];
690 
691  uint8_t *Y = frame->data[0] + ystride * line;
692  uint8_t *U = frame->data[1] + (ustride >> 1) * line;
693  uint8_t *V = frame->data[2] + (vstride >> 1) * line;
694 
695  int h, w, hmargin, vmargin;
696  int huvborder;
697 
698  h = frame->height & ~1;
699  w = frame->width & ~1;
700  hmargin = frame->width - w;
701  vmargin = frame->height - h;
702  huvborder = AV_CEIL_RSHIFT(frame->width, 1) - 1;
703 
704  for (y = 0; y < left - 1 && get_bits_left(gb) >= 3 * w + hmargin * 4; y += 2) {
705  for (x = 0; x < w; x += 2) {
706  Y[x + 0 + 0 * ystride] = decode_sym(gb, lru[0]);
707  Y[x + 1 + 0 * ystride] = decode_sym(gb, lru[0]);
708  Y[x + 0 + 1 * ystride] = decode_sym(gb, lru[0]);
709  Y[x + 1 + 1 * ystride] = decode_sym(gb, lru[0]);
710  U[x >> 1] = decode_sym(gb, lru[1]) ^ 0x80;
711  V[x >> 1] = decode_sym(gb, lru[2]) ^ 0x80;
712  }
713  if (hmargin) {
714  Y[x + 0 * ystride] = decode_sym(gb, lru[0]);
715  Y[x + 1 * ystride] = decode_sym(gb, lru[0]);
716  U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
717  V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
718  }
719 
720  Y += ystride * 2;
721  U += ustride;
722  V += vstride;
723  }
724 
725  if (vmargin) {
726  for (x = 0; x < width; x += 2) {
727  Y[x + 0] = decode_sym(gb, lru[0]);
728  U[x >> 1] = decode_sym(gb, lru[1]) ^ 0x80;
729  V[x >> 1] = decode_sym(gb, lru[2]) ^ 0x80;
730  }
731  if (hmargin) {
732  Y[x] = decode_sym(gb, lru[0]);
733  U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
734  V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
735  }
736  }
737 
738  return y;
739 }
740 
742  const uint8_t *src, int src_size,
743  uint32_t vflipped)
744 {
745  return dxtory_decode_v2(avctx, pic, src, src_size,
748  AV_PIX_FMT_YUV420P, vflipped);
749 }
750 
752  int line, int left,
753  uint8_t lru[3][8])
754 {
755  int x, y;
756 
757  int width = frame->width;
758 
759  int ystride = frame->linesize[0];
760  int ustride = frame->linesize[1];
761  int vstride = frame->linesize[2];
762 
763  uint8_t *Y = frame->data[0] + ystride * line;
764  uint8_t *U = frame->data[1] + ustride * line;
765  uint8_t *V = frame->data[2] + vstride * line;
766 
767  for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
768  for (x = 0; x < width; x++) {
769  Y[x] = decode_sym(gb, lru[0]);
770  U[x] = decode_sym(gb, lru[1]) ^ 0x80;
771  V[x] = decode_sym(gb, lru[2]) ^ 0x80;
772  }
773 
774  Y += ystride;
775  U += ustride;
776  V += vstride;
777  }
778 
779  return y;
780 }
781 
783  const uint8_t *src, int src_size,
784  uint32_t vflipped)
785 {
786  return dxtory_decode_v2(avctx, pic, src, src_size,
789  AV_PIX_FMT_YUV444P, vflipped);
790 }
791 
792 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
793  AVPacket *avpkt)
794 {
795  AVFrame *pic = data;
796  const uint8_t *src = avpkt->data;
797  uint32_t type;
798  int vflipped, ret;
799 
800  if (avpkt->size < 16) {
801  av_log(avctx, AV_LOG_ERROR, "packet too small\n");
802  return AVERROR_INVALIDDATA;
803  }
804 
805  type = AV_RB32(src);
806  vflipped = !!(type & 0x20);
807 
808  switch (type) {
809  case 0x01000021:
810  case 0x01000001:
811  ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
812  AV_PIX_FMT_BGR24, 3, vflipped);
813  break;
814  case 0x01000029:
815  case 0x01000009:
816  ret = dxtory_decode_v2_rgb(avctx, pic, src + 16, avpkt->size - 16, vflipped);
817  break;
818  case 0x02000021:
819  case 0x02000001:
820  ret = dxtory_decode_v1_420(avctx, pic, src + 16, avpkt->size - 16, vflipped);
821  break;
822  case 0x02000029:
823  case 0x02000009:
824  ret = dxtory_decode_v2_420(avctx, pic, src + 16, avpkt->size - 16, vflipped);
825  break;
826  case 0x03000021:
827  case 0x03000001:
828  ret = dxtory_decode_v1_410(avctx, pic, src + 16, avpkt->size - 16, vflipped);
829  break;
830  case 0x03000029:
831  case 0x03000009:
832  ret = dxtory_decode_v2_410(avctx, pic, src + 16, avpkt->size - 16, vflipped);
833  break;
834  case 0x04000021:
835  case 0x04000001:
836  ret = dxtory_decode_v1_444(avctx, pic, src + 16, avpkt->size - 16, vflipped);
837  break;
838  case 0x04000029:
839  case 0x04000009:
840  ret = dxtory_decode_v2_444(avctx, pic, src + 16, avpkt->size - 16, vflipped);
841  break;
842  case 0x17000021:
843  case 0x17000001:
844  ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
845  AV_PIX_FMT_RGB565LE, 2, vflipped);
846  break;
847  case 0x17000029:
848  case 0x17000009:
849  ret = dxtory_decode_v2_565(avctx, pic, src + 16, avpkt->size - 16, 1, vflipped);
850  break;
851  case 0x18000021:
852  case 0x19000021:
853  case 0x18000001:
854  case 0x19000001:
855  ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
856  AV_PIX_FMT_RGB555LE, 2, vflipped);
857  break;
858  case 0x18000029:
859  case 0x19000029:
860  case 0x18000009:
861  case 0x19000009:
862  ret = dxtory_decode_v2_565(avctx, pic, src + 16, avpkt->size - 16, 0, vflipped);
863  break;
864  default:
865  avpriv_request_sample(avctx, "Frame header %"PRIX32, type);
866  return AVERROR_PATCHWELCOME;
867  }
868 
869  if (ret)
870  return ret;
871 
873  pic->key_frame = 1;
874  *got_frame = 1;
875 
876  return avpkt->size;
877 }
878 
880  .name = "dxtory",
881  .long_name = NULL_IF_CONFIG_SMALL("Dxtory"),
882  .type = AVMEDIA_TYPE_VIDEO,
883  .id = AV_CODEC_ID_DXTORY,
884  .decode = decode_frame,
886 };
AVCodec
AVCodec.
Definition: codec.h:197
stride
int stride
Definition: mace.c:144
decode_slice_func
int(* decode_slice_func)(GetBitContext *gb, AVFrame *frame, int line, int height, uint8_t lru[3][8])
Definition: dxtory.c:421
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
def_lru_565
static const uint8_t def_lru_565[8]
Definition: dxtory.c:334
r
const char * r
Definition: vf_curves.c:116
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:243
GetByteContext
Definition: bytestream.h:33
setup_lru_func
void(* setup_lru_func)(uint8_t lru[3][8])
Definition: dxtory.c:424
def_lru
static const uint8_t def_lru[8]
Definition: dxtory.c:332
do_vflip
static void do_vflip(AVCodecContext *avctx, AVFrame *pic, int vflip)
Definition: dxtory.c:55
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
w
uint8_t w
Definition: llviddspenc.c:39
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:369
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:142
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
load_buffer
static int load_buffer(AVCodecContext *avctx, const uint8_t *src, int src_size, GetByteContext *gb, int *nslices, int *off)
Definition: dxtory.c:381
U
#define U(x)
Definition: vp56_arith.h:37
ff_dxtory_decoder
AVCodec ff_dxtory_decoder
Definition: dxtory.c:879
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:396
dxtory_decode_v1_rgb
static int dxtory_decode_v1_rgb(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, int id, int bpp, uint32_t vflipped)
Definition: dxtory.c:92
val
static double val(void *priv, double ch)
Definition: aeval.c:76
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
dxtory_decode_v2_410
static int dxtory_decode_v2_410(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, uint32_t vflipped)
Definition: dxtory.c:669
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
AV_COPY32U
#define AV_COPY32U(d, s)
Definition: intreadwrite.h:572
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
check_slice_size
static int check_slice_size(AVCodecContext *avctx, const uint8_t *src, int src_size, int slice_size, int off)
Definition: dxtory.c:353
width
#define width
decode_sym_565
static uint8_t decode_sym_565(GetBitContext *gb, uint8_t lru[8], int bits)
Definition: dxtory.c:403
intreadwrite.h
dxtory_decode_v1_444
static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, uint32_t vflipped)
Definition: dxtory.c:293
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
g
const char * g
Definition: vf_curves.c:117
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: dxtory.c:792
bits
uint8_t bits
Definition: vp3data.h:141
AV_COPY16U
#define AV_COPY16U(d, s)
Definition: intreadwrite.h:568
dxtory_decode_v2_rgb
static int dxtory_decode_v2_rgb(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, uint32_t vflipped)
Definition: dxtory.c:590
get_bits.h
dxtory_decode_v1_410
static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, uint32_t vflipped)
Definition: dxtory.c:124
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:108
AV_PIX_FMT_RGB565LE
@ AV_PIX_FMT_RGB565LE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:106
default_setup_lru
static void default_setup_lru(uint8_t lru[3][8])
Definition: dxtory.c:582
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
def_lru_555
static const uint8_t def_lru_555[8]
Definition: dxtory.c:333
V
#define V
Definition: avdct.c:30
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:255
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
get_unary
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:46
get_raw_size
static int64_t get_raw_size(enum AVPixelFormat fmt, int width, int height)
Definition: dxtory.c:36
dx2_decode_slice_5x5
static av_always_inline int dx2_decode_slice_5x5(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8], int is_565)
Definition: dxtory.c:492
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:370
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
id
enum AVCodecID id
Definition: extract_extradata_bsf.c:325
dx2_decode_slice_444
static int dx2_decode_slice_444(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8])
Definition: dxtory.c:751
dxtory_decode_v2_420
static int dxtory_decode_v2_420(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, uint32_t vflipped)
Definition: dxtory.c:741
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:391
height
#define height
line
Definition: graph2dot.c:48
unary.h
dxtory_decode_v1_420
static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, uint32_t vflipped)
Definition: dxtory.c:220
Y
#define Y
Definition: boxblur.h:38
AV_COPY16
#define AV_COPY16(d, s)
Definition: intreadwrite.h:597
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:108
i
int i
Definition: input.c:407
common.h
av_always_inline
#define av_always_inline
Definition: attributes.h:49
uint8_t
uint8_t
Definition: audio_convert.c:194
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
setup_lru_555
static void setup_lru_555(uint8_t lru[3][8])
Definition: dxtory.c:518
AVCodecContext::height
int height
Definition: avcodec.h:709
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
setup_lru_565
static void setup_lru_565(uint8_t lru[3][8])
Definition: dxtory.c:525
avcodec.h
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
dx2_decode_slice_420
static int dx2_decode_slice_420(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8])
Definition: dxtory.c:679
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
AVCodecContext
main external API structure.
Definition: avcodec.h:536
ThreadFrame
Definition: thread.h:34
AVCodecContext::discard_damaged_percentage
int discard_damaged_percentage
The percentage of damaged samples to discard a frame.
Definition: avcodec.h:2328
dx2_decode_slice_565
static int dx2_decode_slice_565(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8])
Definition: dxtory.c:538
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
dx2_decode_slice_410
static int dx2_decode_slice_410(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8])
Definition: dxtory.c:600
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
dxtory_decode_v2_444
static int dxtory_decode_v2_444(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, uint32_t vflipped)
Definition: dxtory.c:782
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVPacket
This structure stores compressed data.
Definition: packet.h:346
dx2_decode_slice_rgb
static int dx2_decode_slice_rgb(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8])
Definition: dxtory.c:561
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:709
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
dxtory_decode_v2
static int dxtory_decode_v2(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, decode_slice_func decode_slice, setup_lru_func setup_lru, enum AVPixelFormat fmt, uint32_t vflipped)
Definition: dxtory.c:426
dx2_decode_slice_555
static int dx2_decode_slice_555(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8])
Definition: dxtory.c:532
h
h
Definition: vp9dsp_template.c:2038
decode_sym
static uint8_t decode_sym(GetBitContext *gb, uint8_t lru[8])
Definition: dxtory.c:336
AV_CODEC_ID_DXTORY
@ AV_CODEC_ID_DXTORY
Definition: codec_id.h:205
int
int
Definition: ffmpeg_filter.c:170
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
dxtory_decode_v2_565
static int dxtory_decode_v2_565(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, int is_565, uint32_t vflipped)
Definition: dxtory.c:544