FFmpeg
asvenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Michael Niedermayer
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * ASUS V1/V2 encoder.
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/mem.h"
28 
29 #include "aandcttab.h"
30 #include "asv.h"
31 #include "avcodec.h"
32 #include "dct.h"
33 #include "fdctdsp.h"
34 #include "internal.h"
35 #include "mathops.h"
36 #include "mpeg12data.h"
37 
38 static inline void asv2_put_bits(PutBitContext *pb, int n, int v)
39 {
40  put_bits(pb, n, ff_reverse[v << (8 - n)]);
41 }
42 
43 static inline void asv1_put_level(PutBitContext *pb, int level)
44 {
45  unsigned int index = level + 3;
46 
47  if (index <= 6) {
48  put_bits(pb, ff_asv_level_tab[index][1], ff_asv_level_tab[index][0]);
49  } else {
50  put_bits(pb, ff_asv_level_tab[3][1], ff_asv_level_tab[3][0]);
51  put_sbits(pb, 8, level);
52  }
53 }
54 
55 static inline void asv2_put_level(ASV1Context *a, PutBitContext *pb, int level)
56 {
57  unsigned int index = level + 31;
58 
59  if (index <= 62) {
60  put_bits(pb, ff_asv2_level_tab[index][1], ff_asv2_level_tab[index][0]);
61  } else {
62  put_bits(pb, ff_asv2_level_tab[31][1], ff_asv2_level_tab[31][0]);
63  if (level < -128 || level > 127) {
64  av_log(a->avctx, AV_LOG_WARNING, "Clipping level %d, increase qscale\n", level);
65  level = av_clip_int8(level);
66  }
67  asv2_put_bits(pb, 8, level & 0xFF);
68  }
69 }
70 
71 static inline void asv1_encode_block(ASV1Context *a, int16_t block[64])
72 {
73  int i;
74  int nc_count = 0;
75 
76  put_bits(&a->pb, 8, (block[0] + 32) >> 6);
77  block[0] = 0;
78 
79  for (i = 0; i < 10; i++) {
80  const int index = ff_asv_scantab[4 * i];
81  int ccp = 0;
82 
83  if ((block[index + 0] = (block[index + 0] *
84  a->q_intra_matrix[index + 0] + (1 << 15)) >> 16))
85  ccp |= 8;
86  if ((block[index + 8] = (block[index + 8] *
87  a->q_intra_matrix[index + 8] + (1 << 15)) >> 16))
88  ccp |= 4;
89  if ((block[index + 1] = (block[index + 1] *
90  a->q_intra_matrix[index + 1] + (1 << 15)) >> 16))
91  ccp |= 2;
92  if ((block[index + 9] = (block[index + 9] *
93  a->q_intra_matrix[index + 9] + (1 << 15)) >> 16))
94  ccp |= 1;
95 
96  if (ccp) {
97  for (; nc_count; nc_count--)
98  put_bits(&a->pb, ff_asv_ccp_tab[0][1], ff_asv_ccp_tab[0][0]);
99 
100  put_bits(&a->pb, ff_asv_ccp_tab[ccp][1], ff_asv_ccp_tab[ccp][0]);
101 
102  if (ccp & 8)
103  asv1_put_level(&a->pb, block[index + 0]);
104  if (ccp & 4)
105  asv1_put_level(&a->pb, block[index + 8]);
106  if (ccp & 2)
107  asv1_put_level(&a->pb, block[index + 1]);
108  if (ccp & 1)
109  asv1_put_level(&a->pb, block[index + 9]);
110  } else {
111  nc_count++;
112  }
113  }
114  put_bits(&a->pb, ff_asv_ccp_tab[16][1], ff_asv_ccp_tab[16][0]);
115 }
116 
117 static inline void asv2_encode_block(ASV1Context *a, int16_t block[64])
118 {
119  int i;
120  int count = 0;
121 
122  for (count = 63; count > 3; count--) {
123  const int index = ff_asv_scantab[count];
124  if ((block[index] * a->q_intra_matrix[index] + (1 << 15)) >> 16)
125  break;
126  }
127 
128  count >>= 2;
129 
130  asv2_put_bits(&a->pb, 4, count);
131  asv2_put_bits(&a->pb, 8, (block[0] + 32) >> 6);
132  block[0] = 0;
133 
134  for (i = 0; i <= count; i++) {
135  const int index = ff_asv_scantab[4 * i];
136  int ccp = 0;
137 
138  if ((block[index + 0] = (block[index + 0] *
139  a->q_intra_matrix[index + 0] + (1 << 15)) >> 16))
140  ccp |= 8;
141  if ((block[index + 8] = (block[index + 8] *
142  a->q_intra_matrix[index + 8] + (1 << 15)) >> 16))
143  ccp |= 4;
144  if ((block[index + 1] = (block[index + 1] *
145  a->q_intra_matrix[index + 1] + (1 << 15)) >> 16))
146  ccp |= 2;
147  if ((block[index + 9] = (block[index + 9] *
148  a->q_intra_matrix[index + 9] + (1 << 15)) >> 16))
149  ccp |= 1;
150 
151  av_assert2(i || ccp < 8);
152  if (i)
153  put_bits(&a->pb, ff_asv_ac_ccp_tab[ccp][1], ff_asv_ac_ccp_tab[ccp][0]);
154  else
155  put_bits(&a->pb, ff_asv_dc_ccp_tab[ccp][1], ff_asv_dc_ccp_tab[ccp][0]);
156 
157  if (ccp) {
158  if (ccp & 8)
159  asv2_put_level(a, &a->pb, block[index + 0]);
160  if (ccp & 4)
161  asv2_put_level(a, &a->pb, block[index + 8]);
162  if (ccp & 2)
163  asv2_put_level(a, &a->pb, block[index + 1]);
164  if (ccp & 1)
165  asv2_put_level(a, &a->pb, block[index + 9]);
166  }
167  }
168 }
169 
170 #define MAX_MB_SIZE (30 * 16 * 16 * 3 / 2 / 8)
171 
172 static inline int encode_mb(ASV1Context *a, int16_t block[6][64])
173 {
174  int i;
175 
176  av_assert0(a->pb.buf_end - a->pb.buf - (put_bits_count(&a->pb) >> 3) >= MAX_MB_SIZE);
177 
178  if (a->avctx->codec_id == AV_CODEC_ID_ASV1) {
179  for (i = 0; i < 6; i++)
180  asv1_encode_block(a, block[i]);
181  } else {
182  for (i = 0; i < 6; i++) {
183  asv2_encode_block(a, block[i]);
184  }
185  }
186  return 0;
187 }
188 
189 static inline void dct_get(ASV1Context *a, const AVFrame *frame,
190  int mb_x, int mb_y)
191 {
192  int16_t (*block)[64] = a->block;
193  int linesize = frame->linesize[0];
194  int i;
195 
196  uint8_t *ptr_y = frame->data[0] + (mb_y * 16 * linesize) + mb_x * 16;
197  uint8_t *ptr_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
198  uint8_t *ptr_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
199 
200  a->pdsp.get_pixels(block[0], ptr_y, linesize);
201  a->pdsp.get_pixels(block[1], ptr_y + 8, linesize);
202  a->pdsp.get_pixels(block[2], ptr_y + 8 * linesize, linesize);
203  a->pdsp.get_pixels(block[3], ptr_y + 8 * linesize + 8, linesize);
204  for (i = 0; i < 4; i++)
205  a->fdsp.fdct(block[i]);
206 
207  if (!(a->avctx->flags & AV_CODEC_FLAG_GRAY)) {
208  a->pdsp.get_pixels(block[4], ptr_cb, frame->linesize[1]);
209  a->pdsp.get_pixels(block[5], ptr_cr, frame->linesize[2]);
210  for (i = 4; i < 6; i++)
211  a->fdsp.fdct(block[i]);
212  }
213 }
214 
216  const AVFrame *pict, int *got_packet)
217 {
218  ASV1Context *const a = avctx->priv_data;
219  int size, ret;
220  int mb_x, mb_y;
221 
222  if (pict->width % 16 || pict->height % 16) {
223  AVFrame *clone = av_frame_alloc();
224  int i;
225 
226  if (!clone)
227  return AVERROR(ENOMEM);
228  clone->format = pict->format;
229  clone->width = FFALIGN(pict->width, 16);
230  clone->height = FFALIGN(pict->height, 16);
231  ret = av_frame_get_buffer(clone, 32);
232  if (ret < 0) {
233  av_frame_free(&clone);
234  return ret;
235  }
236 
237  ret = av_frame_copy(clone, pict);
238  if (ret < 0) {
239  av_frame_free(&clone);
240  return ret;
241  }
242 
243  for (i = 0; i<3; i++) {
244  int x, y;
245  int w = AV_CEIL_RSHIFT(pict->width, !!i);
246  int h = AV_CEIL_RSHIFT(pict->height, !!i);
247  int w2 = AV_CEIL_RSHIFT(clone->width, !!i);
248  int h2 = AV_CEIL_RSHIFT(clone->height, !!i);
249  for (y=0; y<h; y++)
250  for (x=w; x<w2; x++)
251  clone->data[i][x + y*clone->linesize[i]] =
252  clone->data[i][w - 1 + y*clone->linesize[i]];
253  for (y=h; y<h2; y++)
254  for (x=0; x<w2; x++)
255  clone->data[i][x + y*clone->linesize[i]] =
256  clone->data[i][x + (h-1)*clone->linesize[i]];
257  }
258  ret = encode_frame(avctx, pkt, clone, got_packet);
259 
260  av_frame_free(&clone);
261  return ret;
262  }
263 
264  if ((ret = ff_alloc_packet2(avctx, pkt, a->mb_height * a->mb_width * MAX_MB_SIZE +
265  AV_INPUT_BUFFER_MIN_SIZE, 0)) < 0)
266  return ret;
267 
268  init_put_bits(&a->pb, pkt->data, pkt->size);
269 
270  for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
271  for (mb_x = 0; mb_x < a->mb_width2; mb_x++) {
272  dct_get(a, pict, mb_x, mb_y);
273  encode_mb(a, a->block);
274  }
275  }
276 
277  if (a->mb_width2 != a->mb_width) {
278  mb_x = a->mb_width2;
279  for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
280  dct_get(a, pict, mb_x, mb_y);
281  encode_mb(a, a->block);
282  }
283  }
284 
285  if (a->mb_height2 != a->mb_height) {
286  mb_y = a->mb_height2;
287  for (mb_x = 0; mb_x < a->mb_width; mb_x++) {
288  dct_get(a, pict, mb_x, mb_y);
289  encode_mb(a, a->block);
290  }
291  }
292  emms_c();
293 
295  while (put_bits_count(&a->pb) & 31)
296  put_bits(&a->pb, 8, 0);
297 
298  size = put_bits_count(&a->pb) / 32;
299 
300  if (avctx->codec_id == AV_CODEC_ID_ASV1) {
301  a->bbdsp.bswap_buf((uint32_t *) pkt->data,
302  (uint32_t *) pkt->data, size);
303  } else {
304  int i;
305  for (i = 0; i < 4 * size; i++)
306  pkt->data[i] = ff_reverse[pkt->data[i]];
307  }
308 
309  pkt->size = size * 4;
310  pkt->flags |= AV_PKT_FLAG_KEY;
311  *got_packet = 1;
312 
313  return 0;
314 }
315 
317 {
318  ASV1Context *const a = avctx->priv_data;
319  int i;
320  const int scale = avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;
321 
322  ff_asv_common_init(avctx);
323  ff_fdctdsp_init(&a->fdsp, avctx);
324  ff_pixblockdsp_init(&a->pdsp, avctx);
325 
326  if (avctx->global_quality <= 0)
327  avctx->global_quality = 4 * FF_QUALITY_SCALE;
328 
329  a->inv_qscale = (32 * scale * FF_QUALITY_SCALE +
330  avctx->global_quality / 2) / avctx->global_quality;
331 
332  avctx->extradata = av_mallocz(8);
333  if (!avctx->extradata)
334  return AVERROR(ENOMEM);
335  avctx->extradata_size = 8;
336  ((uint32_t *) avctx->extradata)[0] = av_le2ne32(a->inv_qscale);
337  ((uint32_t *) avctx->extradata)[1] = av_le2ne32(AV_RL32("ASUS"));
338 
339  for (i = 0; i < 64; i++) {
340  if (a->fdsp.fdct == ff_fdct_ifast) {
341  int q = 32LL * scale * ff_mpeg1_default_intra_matrix[i] * ff_aanscales[i];
342  a->q_intra_matrix[i] = (((int64_t)a->inv_qscale << 30) + q / 2) / q;
343  } else {
344  int q = 32 * scale * ff_mpeg1_default_intra_matrix[i];
345  a->q_intra_matrix[i] = ((a->inv_qscale << 16) + q / 2) / q;
346  }
347  }
348 
349  return 0;
350 }
351 
352 #if CONFIG_ASV1_ENCODER
354  .name = "asv1",
355  .long_name = NULL_IF_CONFIG_SMALL("ASUS V1"),
356  .type = AVMEDIA_TYPE_VIDEO,
357  .id = AV_CODEC_ID_ASV1,
358  .priv_data_size = sizeof(ASV1Context),
359  .init = encode_init,
360  .encode2 = encode_frame,
361  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
362  AV_PIX_FMT_NONE },
363  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
364 };
365 #endif
366 
367 #if CONFIG_ASV2_ENCODER
369  .name = "asv2",
370  .long_name = NULL_IF_CONFIG_SMALL("ASUS V2"),
371  .type = AVMEDIA_TYPE_VIDEO,
372  .id = AV_CODEC_ID_ASV2,
373  .priv_data_size = sizeof(ASV1Context),
374  .init = encode_init,
375  .encode2 = encode_frame,
376  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
377  AV_PIX_FMT_NONE },
378  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
379 };
380 #endif
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: asvenc.c:215
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
const uint8_t ff_asv_ac_ccp_tab[16][2]
Definition: asv.c:60
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
static void put_sbits(PutBitContext *pb, int n, int32_t value)
Definition: put_bits.h:240
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
Memory handling functions.
int16_t block[6][64]
Definition: asv.h:57
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
const uint8_t ff_reverse[256]
Definition: reverse.c:23
int size
Definition: avcodec.h:1483
AVCodecContext * avctx
Definition: asv.h:43
#define FF_QUALITY_SCALE
Definition: avutil.h:230
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
Definition: bitstream.c:48
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static AVPacket pkt
#define av_le2ne32(x)
Definition: bswap.h:96
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
AVCodec.
Definition: avcodec.h:3494
PixblockDSPContext pdsp
Definition: asv.h:48
Macro definitions for various function/variable attributes.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
const uint8_t ff_asv2_level_tab[63][2]
Definition: asv.c:67
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:82
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1671
const uint8_t ff_asv_ccp_tab[17][2]
Definition: asv.c:43
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
uint8_t * data
Definition: avcodec.h:1482
const uint8_t ff_asv_dc_ccp_tab[8][2]
Definition: asv.c:55
ptrdiff_t size
Definition: opengl_enc.c:100
int mb_height2
Definition: asv.h:56
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:888
#define AV_INPUT_BUFFER_MIN_SIZE
minimum encoding buffer size Used to avoid some checks during header writing.
Definition: avcodec.h:802
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1514
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
BswapDSPContext bbdsp
Definition: asv.h:45
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
av_cold void ff_asv_common_init(AVCodecContext *avctx)
Definition: asv.c:83
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1650
uint8_t * buf
Definition: put_bits.h:38
static int encode_mb(ASV1Context *a, int16_t block[6][64])
Definition: asvenc.c:172
const char * name
Name of the codec implementation.
Definition: avcodec.h:3501
GLsizei count
Definition: opengl_enc.c:108
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
ASUS V1/V2 encoder/decoder common data.
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:792
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1488
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:67
uint8_t w
Definition: llviddspenc.c:38
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static av_cold int encode_init(AVCodecContext *avctx)
Definition: asvenc.c:316
AVCodec ff_asv1_encoder
int n
Definition: avisynth_c.h:760
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
AVCodec ff_asv2_encoder
int mb_width
Definition: asv.h:53
static void asv1_encode_block(ASV1Context *a, int16_t block[64])
Definition: asvenc.c:71
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
Libavcodec external API header.
enum AVCodecID codec_id
Definition: avcodec.h:1580
#define MAX_MB_SIZE
Definition: asvenc.c:170
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
main external API structure.
Definition: avcodec.h:1570
PutBitContext pb
Definition: asv.h:49
int q_intra_matrix[64]
Definition: asv.h:59
MPEG-1/2 tables.
uint8_t * buf_end
Definition: put_bits.h:38
static void asv1_put_level(PutBitContext *pb, int level)
Definition: asvenc.c:43
int extradata_size
Definition: avcodec.h:1672
FDCTDSPContext fdsp
Definition: asv.h:46
static void dct_get(ASV1Context *a, const AVFrame *frame, int mb_x, int mb_y)
Definition: asvenc.c:189
int index
Definition: gxfenc.c:89
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
int mb_width2
Definition: asv.h:55
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:324
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1636
void(* get_pixels)(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t stride)
Definition: pixblockdsp.h:29
AAN (Arai, Agui and Nakajima) (I)DCT tables.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
uint8_t level
Definition: svq3.c:207
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
int inv_qscale
Definition: asv.h:52
common internal api header.
static void asv2_encode_block(ASV1Context *a, int16_t block[64])
Definition: asvenc.c:117
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
void * priv_data
Definition: avcodec.h:1597
static void asv2_put_bits(PutBitContext *pb, int n, int v)
Definition: asvenc.c:38
int height
Definition: frame.h:353
static void asv2_put_level(ASV1Context *a, PutBitContext *pb, int level)
Definition: asvenc.c:55
const uint8_t ff_asv_scantab[64]
Definition: asv.c:32
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
const uint8_t ff_asv_level_tab[7][2]
Definition: asv.c:51
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1459
int mb_height
Definition: asv.h:54
for(j=16;j >0;--j)
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58