FFmpeg
proresenc_anatoliy.c
Go to the documentation of this file.
1 /*
2  * Apple ProRes encoder
3  *
4  * Copyright (c) 2011 Anatoliy Wasserman
5  * Copyright (c) 2012 Konstantin Shishkov
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 /**
25  * @file
26  * Apple ProRes encoder (Anatoliy Wasserman version)
27  * Known FOURCCs: 'ap4h' (444), 'apch' (HQ), 'apcn' (422), 'apcs' (LT), 'acpo' (Proxy)
28  */
29 
30 #include "libavutil/opt.h"
31 #include "avcodec.h"
32 #include "dct.h"
33 #include "internal.h"
34 #include "profiles.h"
35 #include "proresdata.h"
36 #include "put_bits.h"
37 #include "bytestream.h"
38 #include "fdctdsp.h"
39 
40 #define DEFAULT_SLICE_MB_WIDTH 8
41 
42 static const AVProfile profiles[] = {
43  { FF_PROFILE_PRORES_PROXY, "apco"},
44  { FF_PROFILE_PRORES_LT, "apcs"},
45  { FF_PROFILE_PRORES_STANDARD, "apcn"},
46  { FF_PROFILE_PRORES_HQ, "apch"},
47  { FF_PROFILE_PRORES_4444, "ap4h"},
48  { FF_PROFILE_PRORES_XQ, "ap4x"},
50 };
51 
52 static const int qp_start_table[6] = { 8, 3, 2, 1, 1, 1};
53 static const int qp_end_table[6] = { 13, 9, 6, 6, 5, 4};
54 static const int bitrate_table[6] = { 1000, 2100, 3500, 5400, 7000, 10000};
55 
60  AVCOL_SPC_BT2020_NCL, INT_MAX };
61 
62 static const uint8_t QMAT_LUMA[6][64] = {
63  {
64  4, 7, 9, 11, 13, 14, 15, 63,
65  7, 7, 11, 12, 14, 15, 63, 63,
66  9, 11, 13, 14, 15, 63, 63, 63,
67  11, 11, 13, 14, 63, 63, 63, 63,
68  11, 13, 14, 63, 63, 63, 63, 63,
69  13, 14, 63, 63, 63, 63, 63, 63,
70  13, 63, 63, 63, 63, 63, 63, 63,
71  63, 63, 63, 63, 63, 63, 63, 63
72  }, {
73  4, 5, 6, 7, 9, 11, 13, 15,
74  5, 5, 7, 8, 11, 13, 15, 17,
75  6, 7, 9, 11, 13, 15, 15, 17,
76  7, 7, 9, 11, 13, 15, 17, 19,
77  7, 9, 11, 13, 14, 16, 19, 23,
78  9, 11, 13, 14, 16, 19, 23, 29,
79  9, 11, 13, 15, 17, 21, 28, 35,
80  11, 13, 16, 17, 21, 28, 35, 41
81  }, {
82  4, 4, 5, 5, 6, 7, 7, 9,
83  4, 4, 5, 6, 7, 7, 9, 9,
84  5, 5, 6, 7, 7, 9, 9, 10,
85  5, 5, 6, 7, 7, 9, 9, 10,
86  5, 6, 7, 7, 8, 9, 10, 12,
87  6, 7, 7, 8, 9, 10, 12, 15,
88  6, 7, 7, 9, 10, 11, 14, 17,
89  7, 7, 9, 10, 11, 14, 17, 21
90  }, {
91  4, 4, 4, 4, 4, 4, 4, 4,
92  4, 4, 4, 4, 4, 4, 4, 4,
93  4, 4, 4, 4, 4, 4, 4, 4,
94  4, 4, 4, 4, 4, 4, 4, 5,
95  4, 4, 4, 4, 4, 4, 5, 5,
96  4, 4, 4, 4, 4, 5, 5, 6,
97  4, 4, 4, 4, 5, 5, 6, 7,
98  4, 4, 4, 4, 5, 6, 7, 7
99  }, { /* 444 */
100  4, 4, 4, 4, 4, 4, 4, 4,
101  4, 4, 4, 4, 4, 4, 4, 4,
102  4, 4, 4, 4, 4, 4, 4, 4,
103  4, 4, 4, 4, 4, 4, 4, 5,
104  4, 4, 4, 4, 4, 4, 5, 5,
105  4, 4, 4, 4, 4, 5, 5, 6,
106  4, 4, 4, 4, 5, 5, 6, 7,
107  4, 4, 4, 4, 5, 6, 7, 7
108  }, { /* 444 XQ */
109  2, 2, 2, 2, 2, 2, 2, 2,
110  2, 2, 2, 2, 2, 2, 2, 2,
111  2, 2, 2, 2, 2, 2, 2, 2,
112  2, 2, 2, 2, 2, 2, 2, 3,
113  2, 2, 2, 2, 2, 2, 3, 3,
114  2, 2, 2, 2, 2, 3, 3, 3,
115  2, 2, 2, 2, 3, 3, 3, 4,
116  2, 2, 2, 2, 3, 3, 4, 4,
117  }
118 };
119 
120 static const uint8_t QMAT_CHROMA[6][64] = {
121  {
122  4, 7, 9, 11, 13, 14, 63, 63,
123  7, 7, 11, 12, 14, 63, 63, 63,
124  9, 11, 13, 14, 63, 63, 63, 63,
125  11, 11, 13, 14, 63, 63, 63, 63,
126  11, 13, 14, 63, 63, 63, 63, 63,
127  13, 14, 63, 63, 63, 63, 63, 63,
128  13, 63, 63, 63, 63, 63, 63, 63,
129  63, 63, 63, 63, 63, 63, 63, 63
130  }, {
131  4, 5, 6, 7, 9, 11, 13, 15,
132  5, 5, 7, 8, 11, 13, 15, 17,
133  6, 7, 9, 11, 13, 15, 15, 17,
134  7, 7, 9, 11, 13, 15, 17, 19,
135  7, 9, 11, 13, 14, 16, 19, 23,
136  9, 11, 13, 14, 16, 19, 23, 29,
137  9, 11, 13, 15, 17, 21, 28, 35,
138  11, 13, 16, 17, 21, 28, 35, 41
139  }, {
140  4, 4, 5, 5, 6, 7, 7, 9,
141  4, 4, 5, 6, 7, 7, 9, 9,
142  5, 5, 6, 7, 7, 9, 9, 10,
143  5, 5, 6, 7, 7, 9, 9, 10,
144  5, 6, 7, 7, 8, 9, 10, 12,
145  6, 7, 7, 8, 9, 10, 12, 15,
146  6, 7, 7, 9, 10, 11, 14, 17,
147  7, 7, 9, 10, 11, 14, 17, 21
148  }, {
149  4, 4, 4, 4, 4, 4, 4, 4,
150  4, 4, 4, 4, 4, 4, 4, 4,
151  4, 4, 4, 4, 4, 4, 4, 4,
152  4, 4, 4, 4, 4, 4, 4, 5,
153  4, 4, 4, 4, 4, 4, 5, 5,
154  4, 4, 4, 4, 4, 5, 5, 6,
155  4, 4, 4, 4, 5, 5, 6, 7,
156  4, 4, 4, 4, 5, 6, 7, 7
157  }, { /* 444 */
158  4, 4, 4, 4, 4, 4, 4, 4,
159  4, 4, 4, 4, 4, 4, 4, 4,
160  4, 4, 4, 4, 4, 4, 4, 4,
161  4, 4, 4, 4, 4, 4, 4, 5,
162  4, 4, 4, 4, 4, 4, 5, 5,
163  4, 4, 4, 4, 4, 5, 5, 6,
164  4, 4, 4, 4, 5, 5, 6, 7,
165  4, 4, 4, 4, 5, 6, 7, 7
166  }, { /* 444 xq */
167  4, 4, 4, 4, 4, 4, 4, 4,
168  4, 4, 4, 4, 4, 4, 4, 4,
169  4, 4, 4, 4, 4, 4, 4, 4,
170  4, 4, 4, 4, 4, 4, 4, 5,
171  4, 4, 4, 4, 4, 4, 5, 5,
172  4, 4, 4, 4, 4, 5, 5, 6,
173  4, 4, 4, 4, 5, 5, 6, 7,
174  4, 4, 4, 4, 5, 6, 7, 7
175  }
176 };
177 
178 
179 typedef struct {
180  AVClass *class;
186 
187  int qmat_luma[16][64];
188  int qmat_chroma[16][64];
190 
191  int is_422;
194 
195  char *vendor;
196 } ProresContext;
197 
198 static void encode_codeword(PutBitContext *pb, int val, int codebook)
199 {
200  unsigned int rice_order, exp_order, switch_bits, first_exp, exp, zeros;
201 
202  /* number of bits to switch between rice and exp golomb */
203  switch_bits = codebook & 3;
204  rice_order = codebook >> 5;
205  exp_order = (codebook >> 2) & 7;
206 
207  first_exp = ((switch_bits + 1) << rice_order);
208 
209  if (val >= first_exp) { /* exp golomb */
210  val -= first_exp;
211  val += (1 << exp_order);
212  exp = av_log2(val);
213  zeros = exp - exp_order + switch_bits + 1;
214  put_bits(pb, zeros, 0);
215  put_bits(pb, exp + 1, val);
216  } else if (rice_order) {
217  put_bits(pb, (val >> rice_order), 0);
218  put_bits(pb, 1, 1);
219  put_sbits(pb, rice_order, val);
220  } else {
221  put_bits(pb, val, 0);
222  put_bits(pb, 1, 1);
223  }
224 }
225 
226 #define QSCALE(qmat,ind,val) ((val) / ((qmat)[ind]))
227 #define TO_GOLOMB(val) (((val) << 1) ^ ((val) >> 31))
228 #define DIFF_SIGN(val, sign) (((val) >> 31) ^ (sign))
229 #define IS_NEGATIVE(val) ((((val) >> 31) ^ -1) + 1)
230 #define TO_GOLOMB2(val,sign) ((val)==0 ? 0 : ((val) << 1) + (sign))
231 
233 {
234  int sign = (val >> 31);
235  return (val ^ sign) - sign;
236 }
237 
238 #define FIRST_DC_CB 0xB8
239 
240 static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
241 
242 static void encode_dc_coeffs(PutBitContext *pb, int16_t *in,
243  int blocks_per_slice, int *qmat)
244 {
245  int prev_dc, code;
246  int i, sign, idx;
247  int new_dc, delta, diff_sign, new_code;
248 
249  prev_dc = QSCALE(qmat, 0, in[0] - 16384);
250  code = TO_GOLOMB(prev_dc);
251  encode_codeword(pb, code, FIRST_DC_CB);
252 
253  code = 5; sign = 0; idx = 64;
254  for (i = 1; i < blocks_per_slice; i++, idx += 64) {
255  new_dc = QSCALE(qmat, 0, in[idx] - 16384);
256  delta = new_dc - prev_dc;
257  diff_sign = DIFF_SIGN(delta, sign);
258  new_code = TO_GOLOMB2(get_level(delta), diff_sign);
259 
260  encode_codeword(pb, new_code, dc_codebook[FFMIN(code, 6)]);
261 
262  code = new_code;
263  sign = delta >> 31;
264  prev_dc = new_dc;
265  }
266 }
267 
268 static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29,
269  0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
270 static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28,
271  0x28, 0x28, 0x28, 0x4C };
272 
274  int16_t *in, int blocks_per_slice, int *qmat, const uint8_t ff_prores_scan[64])
275 {
276  int prev_run = 4;
277  int prev_level = 2;
278 
279  int run = 0, level, code, i, j;
280  for (i = 1; i < 64; i++) {
281  int indp = ff_prores_scan[i];
282  for (j = 0; j < blocks_per_slice; j++) {
283  int val = QSCALE(qmat, indp, in[(j << 6) + indp]);
284  if (val) {
285  encode_codeword(pb, run, run_to_cb[FFMIN(prev_run, 15)]);
286 
287  prev_run = run;
288  run = 0;
289  level = get_level(val);
290  code = level - 1;
291 
292  encode_codeword(pb, code, lev_to_cb[FFMIN(prev_level, 9)]);
293 
294  prev_level = level;
295 
296  put_bits(pb, 1, IS_NEGATIVE(val));
297  } else {
298  ++run;
299  }
300  }
301  }
302 }
303 
304 static void get(uint8_t *pixels, int stride, int16_t* block)
305 {
306  int i;
307 
308  for (i = 0; i < 8; i++) {
310  AV_WN64(block+4, AV_RN64(pixels+8));
311  pixels += stride;
312  block += 8;
313  }
314 }
315 
316 static void fdct_get(FDCTDSPContext *fdsp, uint8_t *pixels, int stride, int16_t* block)
317 {
318  get(pixels, stride, block);
319  fdsp->fdct(block);
320 }
321 
322 static void calc_plane_dct(FDCTDSPContext *fdsp, uint8_t *src, int16_t * blocks, int src_stride, int mb_count, int chroma, int is_422)
323 {
324  int16_t *block;
325  int i;
326 
327  block = blocks;
328 
329  if (!chroma) { /* Luma plane */
330  for (i = 0; i < mb_count; i++) {
331  fdct_get(fdsp, src, src_stride, block + (0 << 6));
332  fdct_get(fdsp, src + 16, src_stride, block + (1 << 6));
333  fdct_get(fdsp, src + 8 * src_stride, src_stride, block + (2 << 6));
334  fdct_get(fdsp, src + 16 + 8 * src_stride, src_stride, block + (3 << 6));
335 
336  block += 256;
337  src += 32;
338  }
339  } else if (chroma && is_422){ /* chroma plane 422 */
340  for (i = 0; i < mb_count; i++) {
341  fdct_get(fdsp, src, src_stride, block + (0 << 6));
342  fdct_get(fdsp, src + 8 * src_stride, src_stride, block + (1 << 6));
343  block += (256 >> 1);
344  src += (32 >> 1);
345  }
346  } else { /* chroma plane 444 */
347  for (i = 0; i < mb_count; i++) {
348  fdct_get(fdsp, src, src_stride, block + (0 << 6));
349  fdct_get(fdsp, src + 8 * src_stride, src_stride, block + (1 << 6));
350  fdct_get(fdsp, src + 16, src_stride, block + (2 << 6));
351  fdct_get(fdsp, src + 16 + 8 * src_stride, src_stride, block + (3 << 6));
352 
353  block += 256;
354  src += 32;
355  }
356  }
357 }
358 
359 static int encode_slice_plane(int16_t *blocks, int mb_count, uint8_t *buf, unsigned buf_size, int *qmat, int sub_sample_chroma,
360  const uint8_t ff_prores_scan[64])
361 {
362  int blocks_per_slice;
363  PutBitContext pb;
364 
365  blocks_per_slice = mb_count << (2 - sub_sample_chroma);
366  init_put_bits(&pb, buf, buf_size);
367 
368  encode_dc_coeffs(&pb, blocks, blocks_per_slice, qmat);
369  encode_ac_coeffs(&pb, blocks, blocks_per_slice, qmat, ff_prores_scan);
370 
371  flush_put_bits(&pb);
372  return put_bits_ptr(&pb) - pb.buf;
373 }
374 
376  int16_t * blocks_y, int16_t * blocks_u, int16_t * blocks_v,
377  unsigned mb_count, uint8_t *buf, unsigned data_size,
378  unsigned* y_data_size, unsigned* u_data_size, unsigned* v_data_size,
379  int qp)
380 {
381  ProresContext* ctx = avctx->priv_data;
382 
383  *y_data_size = encode_slice_plane(blocks_y, mb_count,
384  buf, data_size, ctx->qmat_luma[qp - 1], 0, ctx->scantable);
385 
386  if (!(avctx->flags & AV_CODEC_FLAG_GRAY)) {
387  *u_data_size = encode_slice_plane(blocks_u, mb_count, buf + *y_data_size, data_size - *y_data_size,
388  ctx->qmat_chroma[qp - 1], ctx->is_422, ctx->scantable);
389 
390  *v_data_size = encode_slice_plane(blocks_v, mb_count, buf + *y_data_size + *u_data_size,
391  data_size - *y_data_size - *u_data_size,
392  ctx->qmat_chroma[qp - 1], ctx->is_422, ctx->scantable);
393  }
394 
395  return *y_data_size + *u_data_size + *v_data_size;
396 }
397 
398 static void put_alpha_diff(PutBitContext *pb, int cur, int prev)
399 {
400  const int abits = 16;
401  const int dbits = 7;
402  const int dsize = 1 << dbits - 1;
403  int diff = cur - prev;
404 
405  diff = av_mod_uintp2(diff, abits);
406  if (diff >= (1 << abits) - dsize)
407  diff -= 1 << abits;
408  if (diff < -dsize || diff > dsize || !diff) {
409  put_bits(pb, 1, 1);
410  put_bits(pb, abits, diff);
411  } else {
412  put_bits(pb, 1, 0);
413  put_bits(pb, dbits - 1, FFABS(diff) - 1);
414  put_bits(pb, 1, diff < 0);
415  }
416 }
417 
418 static inline void put_alpha_run(PutBitContext *pb, int run)
419 {
420  if (run) {
421  put_bits(pb, 1, 0);
422  if (run < 0x10)
423  put_bits(pb, 4, run);
424  else
425  put_bits(pb, 15, run);
426  } else {
427  put_bits(pb, 1, 1);
428  }
429 }
430 
431 static av_always_inline int encode_alpha_slice_data(AVCodecContext *avctx, int8_t * src_a,
432  unsigned mb_count, uint8_t *buf, unsigned data_size, unsigned* a_data_size)
433 {
434  const int abits = 16;
435  const int mask = (1 << abits) - 1;
436  const int num_coeffs = mb_count * 256;
437  int prev = mask, cur;
438  int idx = 0;
439  int run = 0;
440  int16_t * blocks = (int16_t *)src_a;
441  PutBitContext pb;
442  init_put_bits(&pb, buf, data_size);
443 
444  cur = blocks[idx++];
445  put_alpha_diff(&pb, cur, prev);
446  prev = cur;
447  do {
448  cur = blocks[idx++];
449  if (cur != prev) {
450  put_alpha_run (&pb, run);
451  put_alpha_diff(&pb, cur, prev);
452  prev = cur;
453  run = 0;
454  } else {
455  run++;
456  }
457  } while (idx < num_coeffs);
458  if (run)
459  put_alpha_run(&pb, run);
460  flush_put_bits(&pb);
461  *a_data_size = put_bits_count(&pb) >> 3;
462 
463  if (put_bits_left(&pb) < 0) {
464  av_log(avctx, AV_LOG_ERROR,
465  "Underestimated required buffer size.\n");
466  return AVERROR_BUG;
467  } else {
468  return 0;
469  }
470 }
471 
472 static inline void subimage_with_fill_template(uint16_t *src, unsigned x, unsigned y,
473  unsigned stride, unsigned width, unsigned height, uint16_t *dst,
474  unsigned dst_width, unsigned dst_height, int is_alpha_plane,
475  int is_interlaced, int is_top_field)
476 {
477  int box_width = FFMIN(width - x, dst_width);
478  int i, j, src_stride, box_height;
479  uint16_t last_pix, *last_line;
480 
481  if (!is_interlaced) {
482  src_stride = stride >> 1;
483  src += y * src_stride + x;
484  box_height = FFMIN(height - y, dst_height);
485  } else {
486  src_stride = stride; /* 2 lines stride */
487  src += y * src_stride + x;
488  box_height = FFMIN(height/2 - y, dst_height);
489  if (!is_top_field)
490  src += stride >> 1;
491  }
492 
493  for (i = 0; i < box_height; ++i) {
494  for (j = 0; j < box_width; ++j) {
495  if (!is_alpha_plane) {
496  dst[j] = src[j];
497  } else {
498  dst[j] = src[j] << 6; /* alpha 10b to 16b */
499  }
500  }
501  if (!is_alpha_plane) {
502  last_pix = dst[j - 1];
503  } else {
504  last_pix = dst[j - 1] << 6; /* alpha 10b to 16b */
505  }
506  for (; j < dst_width; j++)
507  dst[j] = last_pix;
508  src += src_stride;
509  dst += dst_width;
510  }
511  last_line = dst - dst_width;
512  for (; i < dst_height; i++) {
513  for (j = 0; j < dst_width; ++j) {
514  dst[j] = last_line[j];
515  }
516  dst += dst_width;
517  }
518 }
519 
520 static void subimage_with_fill(uint16_t *src, unsigned x, unsigned y,
521  unsigned stride, unsigned width, unsigned height, uint16_t *dst,
522  unsigned dst_width, unsigned dst_height, int is_interlaced, int is_top_field)
523 {
524  subimage_with_fill_template(src, x, y, stride, width, height, dst, dst_width, dst_height, 0, is_interlaced, is_top_field);
525 }
526 
527 /* reorganize alpha data and convert 10b -> 16b */
528 static void subimage_alpha_with_fill(uint16_t *src, unsigned x, unsigned y,
529  unsigned stride, unsigned width, unsigned height, uint16_t *dst,
530  unsigned dst_width, unsigned dst_height, int is_interlaced, int is_top_field)
531 {
532  subimage_with_fill_template(src, x, y, stride, width, height, dst, dst_width, dst_height, 1, is_interlaced, is_top_field);
533 }
534 
535 static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, int mb_x,
536  int mb_y, unsigned mb_count, uint8_t *buf, unsigned data_size,
537  int unsafe, int *qp, int is_interlaced, int is_top_field)
538 {
539  int luma_stride, chroma_stride, alpha_stride = 0;
540  ProresContext* ctx = avctx->priv_data;
541  int hdr_size = 6 + (ctx->need_alpha * 2); /* v data size is write when there is alpha */
542  int ret = 0, slice_size;
543  uint8_t *dest_y, *dest_u, *dest_v;
544  unsigned y_data_size = 0, u_data_size = 0, v_data_size = 0, a_data_size = 0;
545  FDCTDSPContext *fdsp = &ctx->fdsp;
546  int tgt_bits = (mb_count * bitrate_table[avctx->profile]) >> 2;
547  int low_bytes = (tgt_bits - (tgt_bits >> 3)) >> 3; // 12% bitrate fluctuation
548  int high_bytes = (tgt_bits + (tgt_bits >> 3)) >> 3;
549 
550  LOCAL_ALIGNED(16, int16_t, blocks_y, [DEFAULT_SLICE_MB_WIDTH << 8]);
551  LOCAL_ALIGNED(16, int16_t, blocks_u, [DEFAULT_SLICE_MB_WIDTH << 8]);
552  LOCAL_ALIGNED(16, int16_t, blocks_v, [DEFAULT_SLICE_MB_WIDTH << 8]);
553 
554  luma_stride = pic->linesize[0];
555  chroma_stride = pic->linesize[1];
556 
557  if (ctx->need_alpha)
558  alpha_stride = pic->linesize[3];
559 
560  if (!is_interlaced) {
561  dest_y = pic->data[0] + (mb_y << 4) * luma_stride + (mb_x << 5);
562  dest_u = pic->data[1] + (mb_y << 4) * chroma_stride + (mb_x << (5 - ctx->is_422));
563  dest_v = pic->data[2] + (mb_y << 4) * chroma_stride + (mb_x << (5 - ctx->is_422));
564  } else {
565  dest_y = pic->data[0] + (mb_y << 4) * luma_stride * 2 + (mb_x << 5);
566  dest_u = pic->data[1] + (mb_y << 4) * chroma_stride * 2 + (mb_x << (5 - ctx->is_422));
567  dest_v = pic->data[2] + (mb_y << 4) * chroma_stride * 2 + (mb_x << (5 - ctx->is_422));
568  if (!is_top_field){ /* bottom field, offset dest */
569  dest_y += luma_stride;
570  dest_u += chroma_stride;
571  dest_v += chroma_stride;
572  }
573  }
574 
575  if (unsafe) {
576  subimage_with_fill((uint16_t *) pic->data[0], mb_x << 4, mb_y << 4,
577  luma_stride, avctx->width, avctx->height,
578  (uint16_t *) ctx->fill_y, mb_count << 4, 16, is_interlaced, is_top_field);
579  subimage_with_fill((uint16_t *) pic->data[1], mb_x << (4 - ctx->is_422), mb_y << 4,
580  chroma_stride, avctx->width >> ctx->is_422, avctx->height,
581  (uint16_t *) ctx->fill_u, mb_count << (4 - ctx->is_422), 16, is_interlaced, is_top_field);
582  subimage_with_fill((uint16_t *) pic->data[2], mb_x << (4 - ctx->is_422), mb_y << 4,
583  chroma_stride, avctx->width >> ctx->is_422, avctx->height,
584  (uint16_t *) ctx->fill_v, mb_count << (4 - ctx->is_422), 16, is_interlaced, is_top_field);
585 
586  /* no need for interlaced special case, data already reorganized in subimage_with_fill */
587  calc_plane_dct(fdsp, ctx->fill_y, blocks_y, mb_count << 5, mb_count, 0, 0);
588  calc_plane_dct(fdsp, ctx->fill_u, blocks_u, mb_count << (5 - ctx->is_422), mb_count, 1, ctx->is_422);
589  calc_plane_dct(fdsp, ctx->fill_v, blocks_v, mb_count << (5 - ctx->is_422), mb_count, 1, ctx->is_422);
590 
591  slice_size = encode_slice_data(avctx, blocks_y, blocks_u, blocks_v,
592  mb_count, buf + hdr_size, data_size - hdr_size,
593  &y_data_size, &u_data_size, &v_data_size,
594  *qp);
595  } else {
596  if (!is_interlaced) {
597  calc_plane_dct(fdsp, dest_y, blocks_y, luma_stride, mb_count, 0, 0);
598  calc_plane_dct(fdsp, dest_u, blocks_u, chroma_stride, mb_count, 1, ctx->is_422);
599  calc_plane_dct(fdsp, dest_v, blocks_v, chroma_stride, mb_count, 1, ctx->is_422);
600  } else {
601  calc_plane_dct(fdsp, dest_y, blocks_y, luma_stride * 2, mb_count, 0, 0);
602  calc_plane_dct(fdsp, dest_u, blocks_u, chroma_stride * 2, mb_count, 1, ctx->is_422);
603  calc_plane_dct(fdsp, dest_v, blocks_v, chroma_stride * 2, mb_count, 1, ctx->is_422);
604  }
605 
606  slice_size = encode_slice_data(avctx, blocks_y, blocks_u, blocks_v,
607  mb_count, buf + hdr_size, data_size - hdr_size,
608  &y_data_size, &u_data_size, &v_data_size,
609  *qp);
610 
611  if (slice_size > high_bytes && *qp < qp_end_table[avctx->profile]) {
612  do {
613  *qp += 1;
614  slice_size = encode_slice_data(avctx, blocks_y, blocks_u, blocks_v,
615  mb_count, buf + hdr_size, data_size - hdr_size,
616  &y_data_size, &u_data_size, &v_data_size,
617  *qp);
618  } while (slice_size > high_bytes && *qp < qp_end_table[avctx->profile]);
619  } else if (slice_size < low_bytes && *qp
620  > qp_start_table[avctx->profile]) {
621  do {
622  *qp -= 1;
623  slice_size = encode_slice_data(avctx, blocks_y, blocks_u, blocks_v,
624  mb_count, buf + hdr_size, data_size - hdr_size,
625  &y_data_size, &u_data_size, &v_data_size,
626  *qp);
627  } while (slice_size < low_bytes && *qp > qp_start_table[avctx->profile]);
628  }
629  }
630 
631  buf[0] = hdr_size << 3;
632  buf[1] = *qp;
633  AV_WB16(buf + 2, y_data_size);
634  AV_WB16(buf + 4, u_data_size);
635 
636  if (ctx->need_alpha) {
637  AV_WB16(buf + 6, v_data_size); /* write v data size only if there is alpha */
638 
639  subimage_alpha_with_fill((uint16_t *) pic->data[3], mb_x << 4, mb_y << 4,
640  alpha_stride, avctx->width, avctx->height,
641  (uint16_t *) ctx->fill_a, mb_count << 4, 16, is_interlaced, is_top_field);
642  ret = encode_alpha_slice_data(avctx, ctx->fill_a, mb_count,
643  buf + hdr_size + slice_size,
644  data_size - hdr_size - slice_size, &a_data_size);
645  }
646 
647  if (ret != 0) {
648  return ret;
649  }
650  return hdr_size + y_data_size + u_data_size + v_data_size + a_data_size;
651 }
652 
653 static int prores_encode_picture(AVCodecContext *avctx, const AVFrame *pic,
654  uint8_t *buf, const int buf_size, const int picture_index, const int is_top_field)
655 {
656  ProresContext *ctx = avctx->priv_data;
657  int mb_width = (avctx->width + 15) >> 4;
658  int hdr_size, sl_size, i;
659  int mb_y, sl_data_size, qp, mb_height, picture_height, unsafe_mb_height_limit;
660  int unsafe_bot, unsafe_right;
661  uint8_t *sl_data, *sl_data_sizes;
662  int slice_per_line = 0, rem = mb_width;
663 
664  if (!ctx->is_interlaced) { /* progressive encoding */
665  mb_height = (avctx->height + 15) >> 4;
666  unsafe_mb_height_limit = mb_height;
667  } else {
668  if (is_top_field) {
669  picture_height = (avctx->height + 1) / 2;
670  } else {
671  picture_height = avctx->height / 2;
672  }
673  mb_height = (picture_height + 15) >> 4;
674  unsafe_mb_height_limit = mb_height;
675  }
676 
677  for (i = av_log2(DEFAULT_SLICE_MB_WIDTH); i >= 0; --i) {
678  slice_per_line += rem >> i;
679  rem &= (1 << i) - 1;
680  }
681 
682  qp = qp_start_table[avctx->profile];
683  hdr_size = 8; sl_data_size = buf_size - hdr_size;
684  sl_data_sizes = buf + hdr_size;
685  sl_data = sl_data_sizes + (slice_per_line * mb_height * 2);
686  for (mb_y = 0; mb_y < mb_height; mb_y++) {
687  int mb_x = 0;
688  int slice_mb_count = DEFAULT_SLICE_MB_WIDTH;
689  while (mb_x < mb_width) {
690  while (mb_width - mb_x < slice_mb_count)
691  slice_mb_count >>= 1;
692 
693  unsafe_bot = (avctx->height & 0xf) && (mb_y == unsafe_mb_height_limit - 1);
694  unsafe_right = (avctx->width & 0xf) && (mb_x + slice_mb_count == mb_width);
695 
696  sl_size = encode_slice(avctx, pic, mb_x, mb_y, slice_mb_count,
697  sl_data, sl_data_size, unsafe_bot || unsafe_right, &qp, ctx->is_interlaced, is_top_field);
698  if (sl_size < 0){
699  return sl_size;
700  }
701 
702  bytestream_put_be16(&sl_data_sizes, sl_size);
703  sl_data += sl_size;
704  sl_data_size -= sl_size;
705  mb_x += slice_mb_count;
706  }
707  }
708 
709  buf[0] = hdr_size << 3;
710  AV_WB32(buf + 1, sl_data - buf);
711  AV_WB16(buf + 5, slice_per_line * mb_height); /* picture size */
712  buf[7] = av_log2(DEFAULT_SLICE_MB_WIDTH) << 4; /* number of slices */
713 
714  return sl_data - buf;
715 }
716 
718  const AVFrame *pict, int *got_packet)
719 {
720  ProresContext *ctx = avctx->priv_data;
721  int header_size = 148;
722  uint8_t *buf;
723  int compress_frame_size, pic_size, ret, is_top_field_first = 0;
724  uint8_t frame_flags;
725  int frame_size = FFALIGN(avctx->width, 16) * FFALIGN(avctx->height, 16)*16 + 500 + AV_INPUT_BUFFER_MIN_SIZE; //FIXME choose tighter limit
726 
727 
728  if ((ret = ff_alloc_packet2(avctx, pkt, frame_size + AV_INPUT_BUFFER_MIN_SIZE, 0)) < 0)
729  return ret;
730 
731  buf = pkt->data;
732  compress_frame_size = 8 + header_size;
733 
734  bytestream_put_be32(&buf, compress_frame_size);/* frame size will be update after picture(s) encoding */
735  bytestream_put_buffer(&buf, "icpf", 4);
736 
737  bytestream_put_be16(&buf, header_size);
738  bytestream_put_be16(&buf, 0); /* version */
739  bytestream_put_buffer(&buf, ctx->vendor, 4);
740  bytestream_put_be16(&buf, avctx->width);
741  bytestream_put_be16(&buf, avctx->height);
742  frame_flags = 0x82; /* 422 not interlaced */
743  if (avctx->profile >= FF_PROFILE_PRORES_4444) /* 4444 or 4444 Xq */
744  frame_flags |= 0x40; /* 444 chroma */
745  if (ctx->is_interlaced) {
746  if (pict->top_field_first || !pict->interlaced_frame) { /* tff frame or progressive frame interpret as tff */
747  av_log(avctx, AV_LOG_DEBUG, "use interlaced encoding, top field first\n");
748  frame_flags |= 0x04; /* interlaced tff */
749  is_top_field_first = 1;
750  } else {
751  av_log(avctx, AV_LOG_DEBUG, "use interlaced encoding, bottom field first\n");
752  frame_flags |= 0x08; /* interlaced bff */
753  }
754  } else {
755  av_log(avctx, AV_LOG_DEBUG, "use progressive encoding\n");
756  }
757  *buf++ = frame_flags;
758  *buf++ = 0; /* reserved */
759  /* only write color properties, if valid value. set to unspecified otherwise */
760  *buf++ = ff_int_from_list_or_default(avctx, "frame color primaries", pict->color_primaries, valid_primaries, 0);
761  *buf++ = ff_int_from_list_or_default(avctx, "frame color trc", pict->color_trc, valid_trc, 0);
762  *buf++ = ff_int_from_list_or_default(avctx, "frame colorspace", pict->colorspace, valid_colorspace, 0);
763  if (avctx->profile >= FF_PROFILE_PRORES_4444) {
764  if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10) {
765  *buf++ = 0xA0;/* src b64a and no alpha */
766  } else {
767  *buf++ = 0xA2;/* src b64a and 16b alpha */
768  }
769  } else {
770  *buf++ = 32;/* src v210 and no alpha */
771  }
772  *buf++ = 0; /* reserved */
773  *buf++ = 3; /* luma and chroma matrix present */
774 
775  bytestream_put_buffer(&buf, QMAT_LUMA[avctx->profile], 64);
776  bytestream_put_buffer(&buf, QMAT_CHROMA[avctx->profile], 64);
777 
778  pic_size = prores_encode_picture(avctx, pict, buf,
779  pkt->size - compress_frame_size, 0, is_top_field_first);/* encode progressive or first field */
780  if (pic_size < 0) {
781  return pic_size;
782  }
783  compress_frame_size += pic_size;
784 
785  if (ctx->is_interlaced) { /* encode second field */
786  pic_size = prores_encode_picture(avctx, pict, pkt->data + compress_frame_size,
787  pkt->size - compress_frame_size, 1, !is_top_field_first);
788  if (pic_size < 0) {
789  return pic_size;
790  }
791  compress_frame_size += pic_size;
792  }
793 
794  AV_WB32(pkt->data, compress_frame_size);/* update frame size */
795  pkt->flags |= AV_PKT_FLAG_KEY;
796  pkt->size = compress_frame_size;
797  *got_packet = 1;
798 
799  return 0;
800 }
801 
802 static void scale_mat(const uint8_t* src, int* dst, int scale)
803 {
804  int i;
805  for (i = 0; i < 64; i++)
806  dst[i] = src[i] * scale;
807 }
808 
810 {
811  int i;
812  ProresContext* ctx = avctx->priv_data;
813 
814  avctx->bits_per_raw_sample = 10;
815  ctx->need_alpha = 0;
817  if (ctx->is_interlaced) {
819  } else {
821  }
822 
823  if (avctx->width & 0x1) {
824  av_log(avctx, AV_LOG_ERROR,
825  "frame width needs to be multiple of 2\n");
826  return AVERROR(EINVAL);
827  }
828 
829  if (avctx->width > 65534 || avctx->height > 65535) {
830  av_log(avctx, AV_LOG_ERROR,
831  "The maximum dimensions are 65534x65535\n");
832  return AVERROR(EINVAL);
833  }
834 
835  if (strlen(ctx->vendor) != 4) {
836  av_log(avctx, AV_LOG_ERROR, "vendor ID should be 4 bytes\n");
837  return AVERROR(EINVAL);
838  }
839 
840  if (avctx->profile == FF_PROFILE_UNKNOWN) {
841  if (avctx->pix_fmt == AV_PIX_FMT_YUV422P10) {
843  av_log(avctx, AV_LOG_INFO,
844  "encoding with ProRes standard (apcn) profile\n");
845  } else if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10) {
847  av_log(avctx, AV_LOG_INFO,
848  "encoding with ProRes 4444 (ap4h) profile\n");
849  } else if (avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) {
851  av_log(avctx, AV_LOG_INFO,
852  "encoding with ProRes 4444+ (ap4h) profile\n");
853  } else {
854  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format\n");
855  return AVERROR(EINVAL);
856  }
857  } else if (avctx->profile < FF_PROFILE_PRORES_PROXY
858  || avctx->profile > FF_PROFILE_PRORES_XQ) {
859  av_log(
860  avctx,
861  AV_LOG_ERROR,
862  "unknown profile %d, use [0 - apco, 1 - apcs, 2 - apcn (default), 3 - apch, 4 - ap4h, 5 - ap4x]\n",
863  avctx->profile);
864  return AVERROR(EINVAL);
865  } else if ((avctx->pix_fmt == AV_PIX_FMT_YUV422P10) && (avctx->profile > FF_PROFILE_PRORES_HQ)){
866  av_log(avctx, AV_LOG_ERROR,
867  "encoding with ProRes 444/Xq (ap4h/ap4x) profile, need YUV444P10 input\n");
868  return AVERROR(EINVAL);
869  } else if ((avctx->pix_fmt == AV_PIX_FMT_YUV444P10 || avctx->pix_fmt == AV_PIX_FMT_YUVA444P10)
870  && (avctx->profile < FF_PROFILE_PRORES_4444)){
871  av_log(avctx, AV_LOG_ERROR,
872  "encoding with ProRes Proxy/LT/422/422 HQ (apco, apcs, apcn, ap4h) profile, need YUV422P10 input\n");
873  return AVERROR(EINVAL);
874  }
875 
876  if (avctx->profile < FF_PROFILE_PRORES_4444) { /* 422 versions */
877  ctx->is_422 = 1;
878  if ((avctx->height & 0xf) || (avctx->width & 0xf)) {
879  ctx->fill_y = av_malloc(4 * (DEFAULT_SLICE_MB_WIDTH << 8));
880  if (!ctx->fill_y)
881  return AVERROR(ENOMEM);
882  ctx->fill_u = ctx->fill_y + (DEFAULT_SLICE_MB_WIDTH << 9);
883  ctx->fill_v = ctx->fill_u + (DEFAULT_SLICE_MB_WIDTH << 8);
884  }
885  } else { /* 444 */
886  ctx->is_422 = 0;
887  if ((avctx->height & 0xf) || (avctx->width & 0xf)) {
888  ctx->fill_y = av_malloc(3 * (DEFAULT_SLICE_MB_WIDTH << 9));
889  if (!ctx->fill_y)
890  return AVERROR(ENOMEM);
891  ctx->fill_u = ctx->fill_y + (DEFAULT_SLICE_MB_WIDTH << 9);
892  ctx->fill_v = ctx->fill_u + (DEFAULT_SLICE_MB_WIDTH << 9);
893  }
894  if (avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) {
895  ctx->need_alpha = 1;
896  ctx->fill_a = av_malloc(DEFAULT_SLICE_MB_WIDTH << 9); /* 8 blocks x 16px x 16px x sizeof (uint16) */
897  if (!ctx->fill_a)
898  return AVERROR(ENOMEM);
899  }
900  }
901 
902  ff_fdctdsp_init(&ctx->fdsp, avctx);
903 
904  avctx->codec_tag = AV_RL32((const uint8_t*)profiles[avctx->profile].name);
905 
906  for (i = 1; i <= 16; i++) {
907  scale_mat(QMAT_LUMA[avctx->profile] , ctx->qmat_luma[i - 1] , i);
908  scale_mat(QMAT_CHROMA[avctx->profile], ctx->qmat_chroma[i - 1], i);
909  }
910 
911  return 0;
912 }
913 
915 {
916  ProresContext* ctx = avctx->priv_data;
917  av_freep(&ctx->fill_y);
918  av_freep(&ctx->fill_a);
919 
920  return 0;
921 }
922 
923 #define OFFSET(x) offsetof(ProresContext, x)
924 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
925 
926 static const AVOption options[] = {
927  { "vendor", "vendor ID", OFFSET(vendor), AV_OPT_TYPE_STRING, { .str = "fmpg" }, CHAR_MIN, CHAR_MAX, VE },
928  { NULL }
929 };
930 
931 static const AVClass proresaw_enc_class = {
932  .class_name = "ProResAw encoder",
933  .item_name = av_default_item_name,
934  .option = options,
935  .version = LIBAVUTIL_VERSION_INT,
936 };
937 
938 static const AVClass prores_enc_class = {
939  .class_name = "ProRes encoder",
940  .item_name = av_default_item_name,
941  .option = options,
942  .version = LIBAVUTIL_VERSION_INT,
943 };
944 
946  .name = "prores_aw",
947  .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes"),
948  .type = AVMEDIA_TYPE_VIDEO,
949  .id = AV_CODEC_ID_PRORES,
950  .priv_data_size = sizeof(ProresContext),
952  .close = prores_encode_close,
953  .encode2 = prores_encode_frame,
956  .priv_class = &proresaw_enc_class,
958 };
959 
961  .name = "prores",
962  .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes"),
963  .type = AVMEDIA_TYPE_VIDEO,
964  .id = AV_CODEC_ID_PRORES,
965  .priv_data_size = sizeof(ProresContext),
967  .close = prores_encode_close,
968  .encode2 = prores_encode_frame,
971  .priv_class = &prores_enc_class,
973 };
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:498
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:863
static const AVClass prores_enc_class
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
AVOption.
Definition: opt.h:246
static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, int mb_x, int mb_y, unsigned mb_count, uint8_t *buf, unsigned data_size, int unsafe, int *qp, int is_interlaced, int is_top_field)
static int prores_encode_picture(AVCodecContext *avctx, const AVFrame *pic, uint8_t *buf, const int buf_size, const int picture_index, const int is_top_field)
uint8_t qmat_luma[64]
Definition: proresdec.h:43
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:896
static void put_sbits(PutBitContext *pb, int n, int32_t value)
Definition: put_bits.h:240
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
#define TO_GOLOMB2(val, sign)
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static const int valid_colorspace[5]
static const int qp_end_table[6]
SMPTE ST 432-1 (2010) / P3 D65 / Display P3.
Definition: pixfmt.h:458
int size
Definition: avcodec.h:1478
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
static void encode_ac_coeffs(PutBitContext *pb, int16_t *in, int blocks_per_slice, int *qmat, const uint8_t ff_prores_scan[64])
int av_log2(unsigned v)
Definition: intmath.c:26
static av_cold int prores_encode_init(AVCodecContext *avctx)
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:503
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
static const AVOption options[]
#define FF_PROFILE_PRORES_LT
Definition: avcodec.h:3000
SMPTE ST 431-2 (2011) / DCI P3.
Definition: pixfmt.h:457
static void calc_plane_dct(FDCTDSPContext *fdsp, uint8_t *src, int16_t *blocks, int src_stride, int mb_count, int chroma, int is_422)
uint8_t run
Definition: svq3.c:206
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2792
#define FIRST_DC_CB
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
#define src
Definition: vp8dsp.c:254
int profile
profile
Definition: avcodec.h:2894
AVCodec.
Definition: avcodec.h:3477
static const uint8_t QMAT_CHROMA[6][64]
#define FF_PROFILE_PRORES_XQ
Definition: avcodec.h:3004
#define AV_CODEC_CAP_INTRA_ONLY
Codec is intra only.
Definition: avcodec.h:1067
static av_always_inline int encode_alpha_slice_data(AVCodecContext *avctx, int8_t *src_a, unsigned mb_count, uint8_t *buf, unsigned data_size, unsigned *a_data_size)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define VE
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
float delta
AVOptions.
static void encode_dc_coeffs(PutBitContext *pb, int16_t *in, int blocks_per_slice, int *qmat)
#define height
uint8_t * data
Definition: avcodec.h:1477
const uint8_t * scantable
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:415
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:883
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
#define AV_INPUT_BUFFER_MIN_SIZE
minimum encoding buffer size Used to avoid some checks during header writing.
Definition: avcodec.h:797
#define FFALIGN(x, a)
Definition: macros.h:48
const AVProfile ff_prores_profiles[]
Definition: profiles.c:154
#define av_log(a,...)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1509
static const int qp_start_table[6]
static const uint8_t lev_to_cb[10]
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:324
static int prores_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
static int put_bits_left(PutBitContext *s)
Definition: put_bits.h:93
static const uint16_t mask[17]
Definition: lzw.c:38
static av_always_inline void chroma(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset_y, int offset_x, int column, int mirror, int jobnr, int nb_jobs)
Definition: vf_waveform.c:1511
static void put_alpha_run(PutBitContext *pb, int run)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static av_cold int prores_encode_close(AVCodecContext *avctx)
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1645
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:523
uint8_t * buf
Definition: put_bits.h:38
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:445
const char * name
Name of the codec implementation.
Definition: avcodec.h:3484
#define IS_NEGATIVE(val)
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1037
int8_t exp
Definition: eval.c:72
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1483
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
static const int valid_primaries[9]
static av_always_inline int get_level(int val)
#define TO_GOLOMB(val)
#define FFMIN(a, b)
Definition: common.h:96
static void scale_mat(const uint8_t *src, int *dst, int scale)
static void put_alpha_diff(PutBitContext *pb, int cur, int prev)
static const AVProfile profiles[]
#define width
int width
picture width / height.
Definition: avcodec.h:1738
#define FF_PROFILE_PRORES_STANDARD
Definition: avcodec.h:3001
#define FF_PROFILE_UNKNOWN
Definition: avcodec.h:2895
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:507
static const int valid_trc[4]
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:450
AVFormatContext * ctx
Definition: movenc.c:48
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define DEFAULT_SLICE_MB_WIDTH
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:426
AVCodec ff_prores_encoder
#define FF_PROFILE_PRORES_4444
Definition: avcodec.h:3003
also ITU-R BT1361
Definition: pixfmt.h:469
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
const uint8_t ff_prores_interlaced_scan[64]
Definition: proresdata.c:36
int frame_size
Definition: mxfenc.c:2216
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:299
static int encode_slice_plane(int16_t *blocks, int mb_count, uint8_t *buf, unsigned buf_size, int *qmat, int sub_sample_chroma, const uint8_t ff_prores_scan[64])
main external API structure.
Definition: avcodec.h:1565
FDCTDSPContext fdsp
static const uint8_t dc_codebook[7]
const uint8_t ff_prores_progressive_scan[64]
Definition: proresdata.c:25
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:1590
void * buf
Definition: avisynth_c.h:766
#define OFFSET(x)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
const char * name
short name for the profile
Definition: avcodec.h:3412
static av_always_inline unsigned encode_slice_data(AVCodecContext *avctx, int16_t *blocks_y, int16_t *blocks_u, int16_t *blocks_v, unsigned mb_count, uint8_t *buf, unsigned data_size, unsigned *y_data_size, unsigned *u_data_size, unsigned *v_data_size, int qp)
#define FF_PROFILE_PRORES_HQ
Definition: avcodec.h:3002
#define QSCALE(qmat, ind, val)
static const AVClass proresaw_enc_class
static void encode_codeword(PutBitContext *pb, int val, int codebook)
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
static void subimage_alpha_with_fill(uint16_t *src, unsigned x, unsigned y, unsigned stride, unsigned width, unsigned height, uint16_t *dst, unsigned dst_width, unsigned dst_height, int is_interlaced, int is_top_field)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
uint8_t level
Definition: svq3.c:207
static void fdct_get(FDCTDSPContext *fdsp, uint8_t *pixels, int stride, int16_t *block)
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
AVCodec ff_prores_aw_encoder
static const uint8_t QMAT_LUMA[6][64]
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
#define LOCAL_ALIGNED(a, t, v,...)
Definition: internal.h:114
AVProfile.
Definition: avcodec.h:3410
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
static av_always_inline void bytestream_put_buffer(uint8_t **b, const uint8_t *src, unsigned int size)
Definition: bytestream.h:368
void * priv_data
Definition: avcodec.h:1592
static void subimage_with_fill_template(uint16_t *src, unsigned x, unsigned y, unsigned stride, unsigned width, unsigned height, uint16_t *dst, unsigned dst_width, unsigned dst_height, int is_alpha_plane, int is_interlaced, int is_top_field)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:664
uint8_t qmat_chroma[64]
Definition: proresdec.h:44
int pixels
Definition: avisynth_c.h:390
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:420
#define AV_RN64(p)
Definition: intreadwrite.h:368
enum AVColorPrimaries color_primaries
Definition: frame.h:514
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:451
ITU-R BT2020.
Definition: pixfmt.h:454
#define FF_PROFILE_PRORES_PROXY
Definition: avcodec.h:2999
#define av_freep(p)
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:516
#define av_always_inline
Definition: attributes.h:39
int ff_int_from_list_or_default(void *ctx, const char *val_name, int val, const int *array_valid_values, int default_value)
Check if a value is in the list.
Definition: utils.c:2209
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define DIFF_SIGN(val, sign)
static const uint8_t run_to_cb[16]
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1454
static const int bitrate_table[6]
#define AV_WN64(p, v)
Definition: intreadwrite.h:380
static void subimage_with_fill(uint16_t *src, unsigned x, unsigned y, unsigned stride, unsigned width, unsigned height, uint16_t *dst, unsigned dst_width, unsigned dst_height, int is_interlaced, int is_top_field)
bitstream writer API