FFmpeg
proresenc_anatoliy.c
Go to the documentation of this file.
1 /*
2  * Apple ProRes encoder
3  *
4  * Copyright (c) 2011 Anatoliy Wasserman
5  * Copyright (c) 2012 Konstantin Shishkov
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 /**
25  * @file
26  * Apple ProRes encoder (Anatoliy Wasserman version)
27  * Known FOURCCs: 'ap4h' (444), 'apch' (HQ), 'apcn' (422), 'apcs' (LT), 'acpo' (Proxy)
28  */
29 
30 #include "libavutil/opt.h"
31 #include "avcodec.h"
32 #include "dct.h"
33 #include "internal.h"
34 #include "profiles.h"
35 #include "proresdata.h"
36 #include "put_bits.h"
37 #include "bytestream.h"
38 #include "fdctdsp.h"
39 
40 #define DEFAULT_SLICE_MB_WIDTH 8
41 
42 static const AVProfile profiles[] = {
43  { FF_PROFILE_PRORES_PROXY, "apco"},
44  { FF_PROFILE_PRORES_LT, "apcs"},
45  { FF_PROFILE_PRORES_STANDARD, "apcn"},
46  { FF_PROFILE_PRORES_HQ, "apch"},
47  { FF_PROFILE_PRORES_4444, "ap4h"},
48  { FF_PROFILE_PRORES_XQ, "ap4x"},
50 };
51 
52 static const int qp_start_table[] = { 8, 3, 2, 1, 1, 1};
53 static const int qp_end_table[] = { 13, 9, 6, 6, 5, 4};
54 static const int bitrate_table[] = { 1000, 2100, 3500, 5400, 7000, 10000};
55 
59  AVCOL_TRC_ARIB_STD_B67, INT_MAX };
61  AVCOL_SPC_BT2020_NCL, INT_MAX };
62 
63 static const uint8_t QMAT_LUMA[6][64] = {
64  {
65  4, 7, 9, 11, 13, 14, 15, 63,
66  7, 7, 11, 12, 14, 15, 63, 63,
67  9, 11, 13, 14, 15, 63, 63, 63,
68  11, 11, 13, 14, 63, 63, 63, 63,
69  11, 13, 14, 63, 63, 63, 63, 63,
70  13, 14, 63, 63, 63, 63, 63, 63,
71  13, 63, 63, 63, 63, 63, 63, 63,
72  63, 63, 63, 63, 63, 63, 63, 63
73  }, {
74  4, 5, 6, 7, 9, 11, 13, 15,
75  5, 5, 7, 8, 11, 13, 15, 17,
76  6, 7, 9, 11, 13, 15, 15, 17,
77  7, 7, 9, 11, 13, 15, 17, 19,
78  7, 9, 11, 13, 14, 16, 19, 23,
79  9, 11, 13, 14, 16, 19, 23, 29,
80  9, 11, 13, 15, 17, 21, 28, 35,
81  11, 13, 16, 17, 21, 28, 35, 41
82  }, {
83  4, 4, 5, 5, 6, 7, 7, 9,
84  4, 4, 5, 6, 7, 7, 9, 9,
85  5, 5, 6, 7, 7, 9, 9, 10,
86  5, 5, 6, 7, 7, 9, 9, 10,
87  5, 6, 7, 7, 8, 9, 10, 12,
88  6, 7, 7, 8, 9, 10, 12, 15,
89  6, 7, 7, 9, 10, 11, 14, 17,
90  7, 7, 9, 10, 11, 14, 17, 21
91  }, {
92  4, 4, 4, 4, 4, 4, 4, 4,
93  4, 4, 4, 4, 4, 4, 4, 4,
94  4, 4, 4, 4, 4, 4, 4, 4,
95  4, 4, 4, 4, 4, 4, 4, 5,
96  4, 4, 4, 4, 4, 4, 5, 5,
97  4, 4, 4, 4, 4, 5, 5, 6,
98  4, 4, 4, 4, 5, 5, 6, 7,
99  4, 4, 4, 4, 5, 6, 7, 7
100  }, { /* 444 */
101  4, 4, 4, 4, 4, 4, 4, 4,
102  4, 4, 4, 4, 4, 4, 4, 4,
103  4, 4, 4, 4, 4, 4, 4, 4,
104  4, 4, 4, 4, 4, 4, 4, 5,
105  4, 4, 4, 4, 4, 4, 5, 5,
106  4, 4, 4, 4, 4, 5, 5, 6,
107  4, 4, 4, 4, 5, 5, 6, 7,
108  4, 4, 4, 4, 5, 6, 7, 7
109  }, { /* 444 XQ */
110  2, 2, 2, 2, 2, 2, 2, 2,
111  2, 2, 2, 2, 2, 2, 2, 2,
112  2, 2, 2, 2, 2, 2, 2, 2,
113  2, 2, 2, 2, 2, 2, 2, 3,
114  2, 2, 2, 2, 2, 2, 3, 3,
115  2, 2, 2, 2, 2, 3, 3, 3,
116  2, 2, 2, 2, 3, 3, 3, 4,
117  2, 2, 2, 2, 3, 3, 4, 4,
118  }
119 };
120 
121 static const uint8_t QMAT_CHROMA[6][64] = {
122  {
123  4, 7, 9, 11, 13, 14, 63, 63,
124  7, 7, 11, 12, 14, 63, 63, 63,
125  9, 11, 13, 14, 63, 63, 63, 63,
126  11, 11, 13, 14, 63, 63, 63, 63,
127  11, 13, 14, 63, 63, 63, 63, 63,
128  13, 14, 63, 63, 63, 63, 63, 63,
129  13, 63, 63, 63, 63, 63, 63, 63,
130  63, 63, 63, 63, 63, 63, 63, 63
131  }, {
132  4, 5, 6, 7, 9, 11, 13, 15,
133  5, 5, 7, 8, 11, 13, 15, 17,
134  6, 7, 9, 11, 13, 15, 15, 17,
135  7, 7, 9, 11, 13, 15, 17, 19,
136  7, 9, 11, 13, 14, 16, 19, 23,
137  9, 11, 13, 14, 16, 19, 23, 29,
138  9, 11, 13, 15, 17, 21, 28, 35,
139  11, 13, 16, 17, 21, 28, 35, 41
140  }, {
141  4, 4, 5, 5, 6, 7, 7, 9,
142  4, 4, 5, 6, 7, 7, 9, 9,
143  5, 5, 6, 7, 7, 9, 9, 10,
144  5, 5, 6, 7, 7, 9, 9, 10,
145  5, 6, 7, 7, 8, 9, 10, 12,
146  6, 7, 7, 8, 9, 10, 12, 15,
147  6, 7, 7, 9, 10, 11, 14, 17,
148  7, 7, 9, 10, 11, 14, 17, 21
149  }, {
150  4, 4, 4, 4, 4, 4, 4, 4,
151  4, 4, 4, 4, 4, 4, 4, 4,
152  4, 4, 4, 4, 4, 4, 4, 4,
153  4, 4, 4, 4, 4, 4, 4, 5,
154  4, 4, 4, 4, 4, 4, 5, 5,
155  4, 4, 4, 4, 4, 5, 5, 6,
156  4, 4, 4, 4, 5, 5, 6, 7,
157  4, 4, 4, 4, 5, 6, 7, 7
158  }, { /* 444 */
159  4, 4, 4, 4, 4, 4, 4, 4,
160  4, 4, 4, 4, 4, 4, 4, 4,
161  4, 4, 4, 4, 4, 4, 4, 4,
162  4, 4, 4, 4, 4, 4, 4, 5,
163  4, 4, 4, 4, 4, 4, 5, 5,
164  4, 4, 4, 4, 4, 5, 5, 6,
165  4, 4, 4, 4, 5, 5, 6, 7,
166  4, 4, 4, 4, 5, 6, 7, 7
167  }, { /* 444 xq */
168  4, 4, 4, 4, 4, 4, 4, 4,
169  4, 4, 4, 4, 4, 4, 4, 4,
170  4, 4, 4, 4, 4, 4, 4, 4,
171  4, 4, 4, 4, 4, 4, 4, 5,
172  4, 4, 4, 4, 4, 4, 5, 5,
173  4, 4, 4, 4, 4, 5, 5, 6,
174  4, 4, 4, 4, 5, 5, 6, 7,
175  4, 4, 4, 4, 5, 6, 7, 7
176  }
177 };
178 
179 
180 typedef struct {
181  AVClass *class;
187 
188  int qmat_luma[16][64];
189  int qmat_chroma[16][64];
191 
192  int is_422;
195 
196  char *vendor;
197 } ProresContext;
198 
199 static void encode_codeword(PutBitContext *pb, int val, int codebook)
200 {
201  unsigned int rice_order, exp_order, switch_bits, first_exp, exp, zeros;
202 
203  /* number of bits to switch between rice and exp golomb */
204  switch_bits = codebook & 3;
205  rice_order = codebook >> 5;
206  exp_order = (codebook >> 2) & 7;
207 
208  first_exp = ((switch_bits + 1) << rice_order);
209 
210  if (val >= first_exp) { /* exp golomb */
211  val -= first_exp;
212  val += (1 << exp_order);
213  exp = av_log2(val);
214  zeros = exp - exp_order + switch_bits + 1;
215  put_bits(pb, zeros, 0);
216  put_bits(pb, exp + 1, val);
217  } else if (rice_order) {
218  put_bits(pb, (val >> rice_order), 0);
219  put_bits(pb, 1, 1);
220  put_sbits(pb, rice_order, val);
221  } else {
222  put_bits(pb, val, 0);
223  put_bits(pb, 1, 1);
224  }
225 }
226 
227 #define QSCALE(qmat,ind,val) ((val) / ((qmat)[ind]))
228 #define TO_GOLOMB(val) (((val) * 2) ^ ((val) >> 31))
229 #define DIFF_SIGN(val, sign) (((val) >> 31) ^ (sign))
230 #define IS_NEGATIVE(val) ((((val) >> 31) ^ -1) + 1)
231 #define TO_GOLOMB2(val,sign) ((val)==0 ? 0 : ((val) << 1) + (sign))
232 
234 {
235  int sign = (val >> 31);
236  return (val ^ sign) - sign;
237 }
238 
239 #define FIRST_DC_CB 0xB8
240 
241 static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
242 
243 static void encode_dc_coeffs(PutBitContext *pb, int16_t *in,
244  int blocks_per_slice, int *qmat)
245 {
246  int prev_dc, code;
247  int i, sign, idx;
248  int new_dc, delta, diff_sign, new_code;
249 
250  prev_dc = QSCALE(qmat, 0, in[0] - 16384);
251  code = TO_GOLOMB(prev_dc);
252  encode_codeword(pb, code, FIRST_DC_CB);
253 
254  code = 5; sign = 0; idx = 64;
255  for (i = 1; i < blocks_per_slice; i++, idx += 64) {
256  new_dc = QSCALE(qmat, 0, in[idx] - 16384);
257  delta = new_dc - prev_dc;
258  diff_sign = DIFF_SIGN(delta, sign);
259  new_code = TO_GOLOMB2(get_level(delta), diff_sign);
260 
261  encode_codeword(pb, new_code, dc_codebook[FFMIN(code, 6)]);
262 
263  code = new_code;
264  sign = delta >> 31;
265  prev_dc = new_dc;
266  }
267 }
268 
269 static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29,
270  0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
271 static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28,
272  0x28, 0x28, 0x28, 0x4C };
273 
275  int16_t *in, int blocks_per_slice, int *qmat, const uint8_t ff_prores_scan[64])
276 {
277  int prev_run = 4;
278  int prev_level = 2;
279 
280  int run = 0, level, code, i, j;
281  for (i = 1; i < 64; i++) {
282  int indp = ff_prores_scan[i];
283  for (j = 0; j < blocks_per_slice; j++) {
284  int val = QSCALE(qmat, indp, in[(j << 6) + indp]);
285  if (val) {
286  encode_codeword(pb, run, run_to_cb[FFMIN(prev_run, 15)]);
287 
288  prev_run = run;
289  run = 0;
290  level = get_level(val);
291  code = level - 1;
292 
293  encode_codeword(pb, code, lev_to_cb[FFMIN(prev_level, 9)]);
294 
295  prev_level = level;
296 
297  put_bits(pb, 1, IS_NEGATIVE(val));
298  } else {
299  ++run;
300  }
301  }
302  }
303 }
304 
305 static void get(uint8_t *pixels, int stride, int16_t* block)
306 {
307  int i;
308 
309  for (i = 0; i < 8; i++) {
310  AV_WN64(block, AV_RN64(pixels));
311  AV_WN64(block+4, AV_RN64(pixels+8));
312  pixels += stride;
313  block += 8;
314  }
315 }
316 
317 static void fdct_get(FDCTDSPContext *fdsp, uint8_t *pixels, int stride, int16_t* block)
318 {
319  get(pixels, stride, block);
320  fdsp->fdct(block);
321 }
322 
323 static void calc_plane_dct(FDCTDSPContext *fdsp, uint8_t *src, int16_t * blocks, int src_stride, int mb_count, int chroma, int is_422)
324 {
325  int16_t *block;
326  int i;
327 
328  block = blocks;
329 
330  if (!chroma) { /* Luma plane */
331  for (i = 0; i < mb_count; i++) {
332  fdct_get(fdsp, src, src_stride, block + (0 << 6));
333  fdct_get(fdsp, src + 16, src_stride, block + (1 << 6));
334  fdct_get(fdsp, src + 8 * src_stride, src_stride, block + (2 << 6));
335  fdct_get(fdsp, src + 16 + 8 * src_stride, src_stride, block + (3 << 6));
336 
337  block += 256;
338  src += 32;
339  }
340  } else if (chroma && is_422){ /* chroma plane 422 */
341  for (i = 0; i < mb_count; i++) {
342  fdct_get(fdsp, src, src_stride, block + (0 << 6));
343  fdct_get(fdsp, src + 8 * src_stride, src_stride, block + (1 << 6));
344  block += (256 >> 1);
345  src += (32 >> 1);
346  }
347  } else { /* chroma plane 444 */
348  for (i = 0; i < mb_count; i++) {
349  fdct_get(fdsp, src, src_stride, block + (0 << 6));
350  fdct_get(fdsp, src + 8 * src_stride, src_stride, block + (1 << 6));
351  fdct_get(fdsp, src + 16, src_stride, block + (2 << 6));
352  fdct_get(fdsp, src + 16 + 8 * src_stride, src_stride, block + (3 << 6));
353 
354  block += 256;
355  src += 32;
356  }
357  }
358 }
359 
360 static int encode_slice_plane(int16_t *blocks, int mb_count, uint8_t *buf, unsigned buf_size, int *qmat, int sub_sample_chroma,
361  const uint8_t ff_prores_scan[64])
362 {
363  int blocks_per_slice;
364  PutBitContext pb;
365 
366  blocks_per_slice = mb_count << (2 - sub_sample_chroma);
367  init_put_bits(&pb, buf, buf_size);
368 
369  encode_dc_coeffs(&pb, blocks, blocks_per_slice, qmat);
370  encode_ac_coeffs(&pb, blocks, blocks_per_slice, qmat, ff_prores_scan);
371 
372  flush_put_bits(&pb);
373  return put_bits_ptr(&pb) - pb.buf;
374 }
375 
377  int16_t * blocks_y, int16_t * blocks_u, int16_t * blocks_v,
378  unsigned mb_count, uint8_t *buf, unsigned data_size,
379  unsigned* y_data_size, unsigned* u_data_size, unsigned* v_data_size,
380  int qp)
381 {
382  ProresContext* ctx = avctx->priv_data;
383 
384  *y_data_size = encode_slice_plane(blocks_y, mb_count,
385  buf, data_size, ctx->qmat_luma[qp - 1], 0, ctx->scantable);
386 
387  if (!(avctx->flags & AV_CODEC_FLAG_GRAY)) {
388  *u_data_size = encode_slice_plane(blocks_u, mb_count, buf + *y_data_size, data_size - *y_data_size,
389  ctx->qmat_chroma[qp - 1], ctx->is_422, ctx->scantable);
390 
391  *v_data_size = encode_slice_plane(blocks_v, mb_count, buf + *y_data_size + *u_data_size,
392  data_size - *y_data_size - *u_data_size,
393  ctx->qmat_chroma[qp - 1], ctx->is_422, ctx->scantable);
394  }
395 
396  return *y_data_size + *u_data_size + *v_data_size;
397 }
398 
399 static void put_alpha_diff(PutBitContext *pb, int cur, int prev)
400 {
401  const int abits = 16;
402  const int dbits = 7;
403  const int dsize = 1 << dbits - 1;
404  int diff = cur - prev;
405 
406  diff = av_mod_uintp2(diff, abits);
407  if (diff >= (1 << abits) - dsize)
408  diff -= 1 << abits;
409  if (diff < -dsize || diff > dsize || !diff) {
410  put_bits(pb, 1, 1);
411  put_bits(pb, abits, diff);
412  } else {
413  put_bits(pb, 1, 0);
414  put_bits(pb, dbits - 1, FFABS(diff) - 1);
415  put_bits(pb, 1, diff < 0);
416  }
417 }
418 
419 static inline void put_alpha_run(PutBitContext *pb, int run)
420 {
421  if (run) {
422  put_bits(pb, 1, 0);
423  if (run < 0x10)
424  put_bits(pb, 4, run);
425  else
426  put_bits(pb, 15, run);
427  } else {
428  put_bits(pb, 1, 1);
429  }
430 }
431 
432 static av_always_inline int encode_alpha_slice_data(AVCodecContext *avctx, int8_t * src_a,
433  unsigned mb_count, uint8_t *buf, unsigned data_size, unsigned* a_data_size)
434 {
435  const int abits = 16;
436  const int mask = (1 << abits) - 1;
437  const int num_coeffs = mb_count * 256;
438  int prev = mask, cur;
439  int idx = 0;
440  int run = 0;
441  int16_t * blocks = (int16_t *)src_a;
442  PutBitContext pb;
443  init_put_bits(&pb, buf, data_size);
444 
445  cur = blocks[idx++];
446  put_alpha_diff(&pb, cur, prev);
447  prev = cur;
448  do {
449  cur = blocks[idx++];
450  if (cur != prev) {
451  put_alpha_run (&pb, run);
452  put_alpha_diff(&pb, cur, prev);
453  prev = cur;
454  run = 0;
455  } else {
456  run++;
457  }
458  } while (idx < num_coeffs);
459  if (run)
460  put_alpha_run(&pb, run);
461  flush_put_bits(&pb);
462  *a_data_size = put_bits_count(&pb) >> 3;
463 
464  if (put_bits_left(&pb) < 0) {
465  av_log(avctx, AV_LOG_ERROR,
466  "Underestimated required buffer size.\n");
467  return AVERROR_BUG;
468  } else {
469  return 0;
470  }
471 }
472 
473 static inline void subimage_with_fill_template(uint16_t *src, unsigned x, unsigned y,
474  unsigned stride, unsigned width, unsigned height, uint16_t *dst,
475  unsigned dst_width, unsigned dst_height, int is_alpha_plane,
476  int is_interlaced, int is_top_field)
477 {
478  int box_width = FFMIN(width - x, dst_width);
479  int i, j, src_stride, box_height;
480  uint16_t last_pix, *last_line;
481 
482  if (!is_interlaced) {
483  src_stride = stride >> 1;
484  src += y * src_stride + x;
485  box_height = FFMIN(height - y, dst_height);
486  } else {
487  src_stride = stride; /* 2 lines stride */
488  src += y * src_stride + x;
489  box_height = FFMIN(height/2 - y, dst_height);
490  if (!is_top_field)
491  src += stride >> 1;
492  }
493 
494  for (i = 0; i < box_height; ++i) {
495  for (j = 0; j < box_width; ++j) {
496  if (!is_alpha_plane) {
497  dst[j] = src[j];
498  } else {
499  dst[j] = src[j] << 6; /* alpha 10b to 16b */
500  }
501  }
502  if (!is_alpha_plane) {
503  last_pix = dst[j - 1];
504  } else {
505  last_pix = dst[j - 1] << 6; /* alpha 10b to 16b */
506  }
507  for (; j < dst_width; j++)
508  dst[j] = last_pix;
509  src += src_stride;
510  dst += dst_width;
511  }
512  last_line = dst - dst_width;
513  for (; i < dst_height; i++) {
514  for (j = 0; j < dst_width; ++j) {
515  dst[j] = last_line[j];
516  }
517  dst += dst_width;
518  }
519 }
520 
521 static void subimage_with_fill(uint16_t *src, unsigned x, unsigned y,
522  unsigned stride, unsigned width, unsigned height, uint16_t *dst,
523  unsigned dst_width, unsigned dst_height, int is_interlaced, int is_top_field)
524 {
525  subimage_with_fill_template(src, x, y, stride, width, height, dst, dst_width, dst_height, 0, is_interlaced, is_top_field);
526 }
527 
528 /* reorganize alpha data and convert 10b -> 16b */
529 static void subimage_alpha_with_fill(uint16_t *src, unsigned x, unsigned y,
530  unsigned stride, unsigned width, unsigned height, uint16_t *dst,
531  unsigned dst_width, unsigned dst_height, int is_interlaced, int is_top_field)
532 {
533  subimage_with_fill_template(src, x, y, stride, width, height, dst, dst_width, dst_height, 1, is_interlaced, is_top_field);
534 }
535 
536 static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, int mb_x,
537  int mb_y, unsigned mb_count, uint8_t *buf, unsigned data_size,
538  int unsafe, int *qp, int is_interlaced, int is_top_field)
539 {
540  int luma_stride, chroma_stride, alpha_stride = 0;
541  ProresContext* ctx = avctx->priv_data;
542  int hdr_size = 6 + (ctx->need_alpha * 2); /* v data size is write when there is alpha */
543  int ret = 0, slice_size;
544  uint8_t *dest_y, *dest_u, *dest_v;
545  unsigned y_data_size = 0, u_data_size = 0, v_data_size = 0, a_data_size = 0;
546  FDCTDSPContext *fdsp = &ctx->fdsp;
547  int tgt_bits = (mb_count * bitrate_table[avctx->profile]) >> 2;
548  int low_bytes = (tgt_bits - (tgt_bits >> 3)) >> 3; // 12% bitrate fluctuation
549  int high_bytes = (tgt_bits + (tgt_bits >> 3)) >> 3;
550 
551  LOCAL_ALIGNED(16, int16_t, blocks_y, [DEFAULT_SLICE_MB_WIDTH << 8]);
552  LOCAL_ALIGNED(16, int16_t, blocks_u, [DEFAULT_SLICE_MB_WIDTH << 8]);
553  LOCAL_ALIGNED(16, int16_t, blocks_v, [DEFAULT_SLICE_MB_WIDTH << 8]);
554 
555  luma_stride = pic->linesize[0];
556  chroma_stride = pic->linesize[1];
557 
558  if (ctx->need_alpha)
559  alpha_stride = pic->linesize[3];
560 
561  if (!is_interlaced) {
562  dest_y = pic->data[0] + (mb_y << 4) * luma_stride + (mb_x << 5);
563  dest_u = pic->data[1] + (mb_y << 4) * chroma_stride + (mb_x << (5 - ctx->is_422));
564  dest_v = pic->data[2] + (mb_y << 4) * chroma_stride + (mb_x << (5 - ctx->is_422));
565  } else {
566  dest_y = pic->data[0] + (mb_y << 4) * luma_stride * 2 + (mb_x << 5);
567  dest_u = pic->data[1] + (mb_y << 4) * chroma_stride * 2 + (mb_x << (5 - ctx->is_422));
568  dest_v = pic->data[2] + (mb_y << 4) * chroma_stride * 2 + (mb_x << (5 - ctx->is_422));
569  if (!is_top_field){ /* bottom field, offset dest */
570  dest_y += luma_stride;
571  dest_u += chroma_stride;
572  dest_v += chroma_stride;
573  }
574  }
575 
576  if (unsafe) {
577  subimage_with_fill((uint16_t *) pic->data[0], mb_x << 4, mb_y << 4,
578  luma_stride, avctx->width, avctx->height,
579  (uint16_t *) ctx->fill_y, mb_count << 4, 16, is_interlaced, is_top_field);
580  subimage_with_fill((uint16_t *) pic->data[1], mb_x << (4 - ctx->is_422), mb_y << 4,
581  chroma_stride, avctx->width >> ctx->is_422, avctx->height,
582  (uint16_t *) ctx->fill_u, mb_count << (4 - ctx->is_422), 16, is_interlaced, is_top_field);
583  subimage_with_fill((uint16_t *) pic->data[2], mb_x << (4 - ctx->is_422), mb_y << 4,
584  chroma_stride, avctx->width >> ctx->is_422, avctx->height,
585  (uint16_t *) ctx->fill_v, mb_count << (4 - ctx->is_422), 16, is_interlaced, is_top_field);
586 
587  /* no need for interlaced special case, data already reorganized in subimage_with_fill */
588  calc_plane_dct(fdsp, ctx->fill_y, blocks_y, mb_count << 5, mb_count, 0, 0);
589  calc_plane_dct(fdsp, ctx->fill_u, blocks_u, mb_count << (5 - ctx->is_422), mb_count, 1, ctx->is_422);
590  calc_plane_dct(fdsp, ctx->fill_v, blocks_v, mb_count << (5 - ctx->is_422), mb_count, 1, ctx->is_422);
591 
592  slice_size = encode_slice_data(avctx, blocks_y, blocks_u, blocks_v,
593  mb_count, buf + hdr_size, data_size - hdr_size,
594  &y_data_size, &u_data_size, &v_data_size,
595  *qp);
596  } else {
597  if (!is_interlaced) {
598  calc_plane_dct(fdsp, dest_y, blocks_y, luma_stride, mb_count, 0, 0);
599  calc_plane_dct(fdsp, dest_u, blocks_u, chroma_stride, mb_count, 1, ctx->is_422);
600  calc_plane_dct(fdsp, dest_v, blocks_v, chroma_stride, mb_count, 1, ctx->is_422);
601  } else {
602  calc_plane_dct(fdsp, dest_y, blocks_y, luma_stride * 2, mb_count, 0, 0);
603  calc_plane_dct(fdsp, dest_u, blocks_u, chroma_stride * 2, mb_count, 1, ctx->is_422);
604  calc_plane_dct(fdsp, dest_v, blocks_v, chroma_stride * 2, mb_count, 1, ctx->is_422);
605  }
606 
607  slice_size = encode_slice_data(avctx, blocks_y, blocks_u, blocks_v,
608  mb_count, buf + hdr_size, data_size - hdr_size,
609  &y_data_size, &u_data_size, &v_data_size,
610  *qp);
611 
612  if (slice_size > high_bytes && *qp < qp_end_table[avctx->profile]) {
613  do {
614  *qp += 1;
615  slice_size = encode_slice_data(avctx, blocks_y, blocks_u, blocks_v,
616  mb_count, buf + hdr_size, data_size - hdr_size,
617  &y_data_size, &u_data_size, &v_data_size,
618  *qp);
619  } while (slice_size > high_bytes && *qp < qp_end_table[avctx->profile]);
620  } else if (slice_size < low_bytes && *qp
621  > qp_start_table[avctx->profile]) {
622  do {
623  *qp -= 1;
624  slice_size = encode_slice_data(avctx, blocks_y, blocks_u, blocks_v,
625  mb_count, buf + hdr_size, data_size - hdr_size,
626  &y_data_size, &u_data_size, &v_data_size,
627  *qp);
628  } while (slice_size < low_bytes && *qp > qp_start_table[avctx->profile]);
629  }
630  }
631 
632  buf[0] = hdr_size << 3;
633  buf[1] = *qp;
634  AV_WB16(buf + 2, y_data_size);
635  AV_WB16(buf + 4, u_data_size);
636 
637  if (ctx->need_alpha) {
638  AV_WB16(buf + 6, v_data_size); /* write v data size only if there is alpha */
639 
640  subimage_alpha_with_fill((uint16_t *) pic->data[3], mb_x << 4, mb_y << 4,
641  alpha_stride, avctx->width, avctx->height,
642  (uint16_t *) ctx->fill_a, mb_count << 4, 16, is_interlaced, is_top_field);
643  ret = encode_alpha_slice_data(avctx, ctx->fill_a, mb_count,
644  buf + hdr_size + slice_size,
645  data_size - hdr_size - slice_size, &a_data_size);
646  }
647 
648  if (ret != 0) {
649  return ret;
650  }
651  return hdr_size + y_data_size + u_data_size + v_data_size + a_data_size;
652 }
653 
654 static int prores_encode_picture(AVCodecContext *avctx, const AVFrame *pic,
655  uint8_t *buf, const int buf_size, const int picture_index, const int is_top_field)
656 {
657  ProresContext *ctx = avctx->priv_data;
658  int mb_width = (avctx->width + 15) >> 4;
659  int hdr_size, sl_size, i;
660  int mb_y, sl_data_size, qp, mb_height, picture_height, unsafe_mb_height_limit;
661  int unsafe_bot, unsafe_right;
662  uint8_t *sl_data, *sl_data_sizes;
663  int slice_per_line = 0, rem = mb_width;
664 
665  if (!ctx->is_interlaced) { /* progressive encoding */
666  mb_height = (avctx->height + 15) >> 4;
667  unsafe_mb_height_limit = mb_height;
668  } else {
669  if (is_top_field) {
670  picture_height = (avctx->height + 1) / 2;
671  } else {
672  picture_height = avctx->height / 2;
673  }
674  mb_height = (picture_height + 15) >> 4;
675  unsafe_mb_height_limit = mb_height;
676  }
677 
678  for (i = av_log2(DEFAULT_SLICE_MB_WIDTH); i >= 0; --i) {
679  slice_per_line += rem >> i;
680  rem &= (1 << i) - 1;
681  }
682 
683  qp = qp_start_table[avctx->profile];
684  hdr_size = 8; sl_data_size = buf_size - hdr_size;
685  sl_data_sizes = buf + hdr_size;
686  sl_data = sl_data_sizes + (slice_per_line * mb_height * 2);
687  for (mb_y = 0; mb_y < mb_height; mb_y++) {
688  int mb_x = 0;
689  int slice_mb_count = DEFAULT_SLICE_MB_WIDTH;
690  while (mb_x < mb_width) {
691  while (mb_width - mb_x < slice_mb_count)
692  slice_mb_count >>= 1;
693 
694  unsafe_bot = (avctx->height & 0xf) && (mb_y == unsafe_mb_height_limit - 1);
695  unsafe_right = (avctx->width & 0xf) && (mb_x + slice_mb_count == mb_width);
696 
697  sl_size = encode_slice(avctx, pic, mb_x, mb_y, slice_mb_count,
698  sl_data, sl_data_size, unsafe_bot || unsafe_right, &qp, ctx->is_interlaced, is_top_field);
699  if (sl_size < 0){
700  return sl_size;
701  }
702 
703  bytestream_put_be16(&sl_data_sizes, sl_size);
704  sl_data += sl_size;
705  sl_data_size -= sl_size;
706  mb_x += slice_mb_count;
707  }
708  }
709 
710  buf[0] = hdr_size << 3;
711  AV_WB32(buf + 1, sl_data - buf);
712  AV_WB16(buf + 5, slice_per_line * mb_height); /* picture size */
713  buf[7] = av_log2(DEFAULT_SLICE_MB_WIDTH) << 4; /* number of slices */
714 
715  return sl_data - buf;
716 }
717 
719  const AVFrame *pict, int *got_packet)
720 {
721  ProresContext *ctx = avctx->priv_data;
722  int header_size = 148;
723  uint8_t *buf;
724  int compress_frame_size, pic_size, ret, is_top_field_first = 0;
725  uint8_t frame_flags;
726  int frame_size = FFALIGN(avctx->width, 16) * FFALIGN(avctx->height, 16)*16 + 500 + AV_INPUT_BUFFER_MIN_SIZE; //FIXME choose tighter limit
727 
728 
729  if ((ret = ff_alloc_packet2(avctx, pkt, frame_size + AV_INPUT_BUFFER_MIN_SIZE, 0)) < 0)
730  return ret;
731 
732  buf = pkt->data;
733  compress_frame_size = 8 + header_size;
734 
735  bytestream_put_be32(&buf, compress_frame_size);/* frame size will be update after picture(s) encoding */
736  bytestream_put_buffer(&buf, "icpf", 4);
737 
738  bytestream_put_be16(&buf, header_size);
739  bytestream_put_be16(&buf, 0); /* version */
740  bytestream_put_buffer(&buf, ctx->vendor, 4);
741  bytestream_put_be16(&buf, avctx->width);
742  bytestream_put_be16(&buf, avctx->height);
743  frame_flags = 0x82; /* 422 not interlaced */
744  if (avctx->profile >= FF_PROFILE_PRORES_4444) /* 4444 or 4444 Xq */
745  frame_flags |= 0x40; /* 444 chroma */
746  if (ctx->is_interlaced) {
747  if (pict->top_field_first || !pict->interlaced_frame) { /* tff frame or progressive frame interpret as tff */
748  av_log(avctx, AV_LOG_DEBUG, "use interlaced encoding, top field first\n");
749  frame_flags |= 0x04; /* interlaced tff */
750  is_top_field_first = 1;
751  } else {
752  av_log(avctx, AV_LOG_DEBUG, "use interlaced encoding, bottom field first\n");
753  frame_flags |= 0x08; /* interlaced bff */
754  }
755  } else {
756  av_log(avctx, AV_LOG_DEBUG, "use progressive encoding\n");
757  }
758  *buf++ = frame_flags;
759  *buf++ = 0; /* reserved */
760  /* only write color properties, if valid value. set to unspecified otherwise */
761  *buf++ = ff_int_from_list_or_default(avctx, "frame color primaries", pict->color_primaries, valid_primaries, 0);
762  *buf++ = ff_int_from_list_or_default(avctx, "frame color trc", pict->color_trc, valid_trc, 0);
763  *buf++ = ff_int_from_list_or_default(avctx, "frame colorspace", pict->colorspace, valid_colorspace, 0);
764  if (avctx->profile >= FF_PROFILE_PRORES_4444) {
765  if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10) {
766  *buf++ = 0xA0;/* src b64a and no alpha */
767  } else {
768  *buf++ = 0xA2;/* src b64a and 16b alpha */
769  }
770  } else {
771  *buf++ = 32;/* src v210 and no alpha */
772  }
773  *buf++ = 0; /* reserved */
774  *buf++ = 3; /* luma and chroma matrix present */
775 
776  bytestream_put_buffer(&buf, QMAT_LUMA[avctx->profile], 64);
777  bytestream_put_buffer(&buf, QMAT_CHROMA[avctx->profile], 64);
778 
779  pic_size = prores_encode_picture(avctx, pict, buf,
780  pkt->size - compress_frame_size, 0, is_top_field_first);/* encode progressive or first field */
781  if (pic_size < 0) {
782  return pic_size;
783  }
784  compress_frame_size += pic_size;
785 
786  if (ctx->is_interlaced) { /* encode second field */
787  pic_size = prores_encode_picture(avctx, pict, pkt->data + compress_frame_size,
788  pkt->size - compress_frame_size, 1, !is_top_field_first);
789  if (pic_size < 0) {
790  return pic_size;
791  }
792  compress_frame_size += pic_size;
793  }
794 
795  AV_WB32(pkt->data, compress_frame_size);/* update frame size */
796  pkt->flags |= AV_PKT_FLAG_KEY;
797  pkt->size = compress_frame_size;
798  *got_packet = 1;
799 
800  return 0;
801 }
802 
803 static void scale_mat(const uint8_t* src, int* dst, int scale)
804 {
805  int i;
806  for (i = 0; i < 64; i++)
807  dst[i] = src[i] * scale;
808 }
809 
811 {
812  int i;
813  ProresContext* ctx = avctx->priv_data;
814 
815  avctx->bits_per_raw_sample = 10;
816  ctx->need_alpha = 0;
818  if (ctx->is_interlaced) {
820  } else {
822  }
823 
824  if (avctx->width & 0x1) {
825  av_log(avctx, AV_LOG_ERROR,
826  "frame width needs to be multiple of 2\n");
827  return AVERROR(EINVAL);
828  }
829 
830  if (avctx->width > 65534 || avctx->height > 65535) {
831  av_log(avctx, AV_LOG_ERROR,
832  "The maximum dimensions are 65534x65535\n");
833  return AVERROR(EINVAL);
834  }
835 
836  if (strlen(ctx->vendor) != 4) {
837  av_log(avctx, AV_LOG_ERROR, "vendor ID should be 4 bytes\n");
838  return AVERROR(EINVAL);
839  }
840 
841  if (avctx->profile == FF_PROFILE_UNKNOWN) {
842  if (avctx->pix_fmt == AV_PIX_FMT_YUV422P10) {
844  av_log(avctx, AV_LOG_INFO,
845  "encoding with ProRes standard (apcn) profile\n");
846  } else if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10) {
848  av_log(avctx, AV_LOG_INFO,
849  "encoding with ProRes 4444 (ap4h) profile\n");
850  } else if (avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) {
852  av_log(avctx, AV_LOG_INFO,
853  "encoding with ProRes 4444+ (ap4h) profile\n");
854  } else {
855  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format\n");
856  return AVERROR(EINVAL);
857  }
858  } else if (avctx->profile < FF_PROFILE_PRORES_PROXY
859  || avctx->profile > FF_PROFILE_PRORES_XQ) {
860  av_log(
861  avctx,
862  AV_LOG_ERROR,
863  "unknown profile %d, use [0 - apco, 1 - apcs, 2 - apcn (default), 3 - apch, 4 - ap4h, 5 - ap4x]\n",
864  avctx->profile);
865  return AVERROR(EINVAL);
866  } else if ((avctx->pix_fmt == AV_PIX_FMT_YUV422P10) && (avctx->profile > FF_PROFILE_PRORES_HQ)){
867  av_log(avctx, AV_LOG_ERROR,
868  "encoding with ProRes 444/Xq (ap4h/ap4x) profile, need YUV444P10 input\n");
869  return AVERROR(EINVAL);
870  } else if ((avctx->pix_fmt == AV_PIX_FMT_YUV444P10 || avctx->pix_fmt == AV_PIX_FMT_YUVA444P10)
871  && (avctx->profile < FF_PROFILE_PRORES_4444)){
872  av_log(avctx, AV_LOG_ERROR,
873  "encoding with ProRes Proxy/LT/422/422 HQ (apco, apcs, apcn, ap4h) profile, need YUV422P10 input\n");
874  return AVERROR(EINVAL);
875  }
876 
877  if (avctx->profile < FF_PROFILE_PRORES_4444) { /* 422 versions */
878  ctx->is_422 = 1;
879  if ((avctx->height & 0xf) || (avctx->width & 0xf)) {
880  ctx->fill_y = av_malloc(4 * (DEFAULT_SLICE_MB_WIDTH << 8));
881  if (!ctx->fill_y)
882  return AVERROR(ENOMEM);
883  ctx->fill_u = ctx->fill_y + (DEFAULT_SLICE_MB_WIDTH << 9);
884  ctx->fill_v = ctx->fill_u + (DEFAULT_SLICE_MB_WIDTH << 8);
885  }
886  } else { /* 444 */
887  ctx->is_422 = 0;
888  if ((avctx->height & 0xf) || (avctx->width & 0xf)) {
889  ctx->fill_y = av_malloc(3 * (DEFAULT_SLICE_MB_WIDTH << 9));
890  if (!ctx->fill_y)
891  return AVERROR(ENOMEM);
892  ctx->fill_u = ctx->fill_y + (DEFAULT_SLICE_MB_WIDTH << 9);
893  ctx->fill_v = ctx->fill_u + (DEFAULT_SLICE_MB_WIDTH << 9);
894  }
895  if (avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) {
896  ctx->need_alpha = 1;
897  ctx->fill_a = av_malloc(DEFAULT_SLICE_MB_WIDTH << 9); /* 8 blocks x 16px x 16px x sizeof (uint16) */
898  if (!ctx->fill_a)
899  return AVERROR(ENOMEM);
900  }
901  }
902 
903  ff_fdctdsp_init(&ctx->fdsp, avctx);
904 
905  avctx->codec_tag = AV_RL32((const uint8_t*)profiles[avctx->profile].name);
906 
907  for (i = 1; i <= 16; i++) {
908  scale_mat(QMAT_LUMA[avctx->profile] , ctx->qmat_luma[i - 1] , i);
909  scale_mat(QMAT_CHROMA[avctx->profile], ctx->qmat_chroma[i - 1], i);
910  }
911 
912  return 0;
913 }
914 
916 {
917  ProresContext* ctx = avctx->priv_data;
918  av_freep(&ctx->fill_y);
919  av_freep(&ctx->fill_a);
920 
921  return 0;
922 }
923 
924 #define OFFSET(x) offsetof(ProresContext, x)
925 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
926 
927 static const AVOption options[] = {
928  { "vendor", "vendor ID", OFFSET(vendor), AV_OPT_TYPE_STRING, { .str = "fmpg" }, 0, 0, VE },
929  { NULL }
930 };
931 
932 static const AVClass proresaw_enc_class = {
933  .class_name = "ProResAw encoder",
934  .item_name = av_default_item_name,
935  .option = options,
936  .version = LIBAVUTIL_VERSION_INT,
937 };
938 
939 static const AVClass prores_enc_class = {
940  .class_name = "ProRes encoder",
941  .item_name = av_default_item_name,
942  .option = options,
943  .version = LIBAVUTIL_VERSION_INT,
944 };
945 
947  .name = "prores_aw",
948  .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes"),
949  .type = AVMEDIA_TYPE_VIDEO,
950  .id = AV_CODEC_ID_PRORES,
951  .priv_data_size = sizeof(ProresContext),
953  .close = prores_encode_close,
954  .encode2 = prores_encode_frame,
956  .capabilities = AV_CODEC_CAP_FRAME_THREADS,
957  .priv_class = &proresaw_enc_class,
959 };
960 
962  .name = "prores",
963  .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes"),
964  .type = AVMEDIA_TYPE_VIDEO,
965  .id = AV_CODEC_ID_PRORES,
966  .priv_data_size = sizeof(ProresContext),
968  .close = prores_encode_close,
969  .encode2 = prores_encode_frame,
971  .capabilities = AV_CODEC_CAP_FRAME_THREADS,
972  .priv_class = &prores_enc_class,
974 };
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:514
#define NULL
Definition: coverity.c:32
static const unsigned codebook[256][2]
Definition: cfhdenc.c:41
static const AVClass prores_enc_class
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
AVOption.
Definition: opt.h:248
static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, int mb_x, int mb_y, unsigned mb_count, uint8_t *buf, unsigned data_size, int unsafe, int *qp, int is_interlaced, int is_top_field)
static int prores_encode_picture(AVCodecContext *avctx, const AVFrame *pic, uint8_t *buf, const int buf_size, const int picture_index, const int is_top_field)
uint8_t qmat_luma[64]
Definition: proresdec.h:43
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:321
static void put_sbits(PutBitContext *pb, int n, int32_t value)
Definition: put_bits.h:258
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:218
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
#define TO_GOLOMB2(val, sign)
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
SMPTE ST 432-1 (2010) / P3 D65 / Display P3.
Definition: pixfmt.h:473
int size
Definition: packet.h:364
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
static void encode_ac_coeffs(PutBitContext *pb, int16_t *in, int blocks_per_slice, int *qmat, const uint8_t ff_prores_scan[64])
int av_log2(unsigned v)
Definition: intmath.c:26
static av_cold int prores_encode_init(AVCodecContext *avctx)
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:519
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
static const AVOption options[]
#define FF_PROFILE_PRORES_LT
Definition: avcodec.h:1965
SMPTE ST 431-2 (2011) / DCI P3.
Definition: pixfmt.h:472
static void calc_plane_dct(FDCTDSPContext *fdsp, uint8_t *src, int16_t *blocks, int src_stride, int mb_count, int chroma, int is_422)
uint8_t run
Definition: svq3.c:204
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
#define FIRST_DC_CB
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
int profile
profile
Definition: avcodec.h:1859
AVCodec.
Definition: codec.h:190
static const uint8_t QMAT_CHROMA[6][64]
#define FF_PROFILE_PRORES_XQ
Definition: avcodec.h:1969
static av_always_inline int encode_alpha_slice_data(AVCodecContext *avctx, int8_t *src_a, unsigned mb_count, uint8_t *buf, unsigned data_size, unsigned *a_data_size)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define VE
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:33
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
float delta
AVOptions.
static void encode_dc_coeffs(PutBitContext *pb, int16_t *in, int blocks_per_slice, int *qmat)
#define height
uint8_t * data
Definition: packet.h:363
static const int qp_start_table[]
const uint8_t * scantable
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:455
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
#define AV_INPUT_BUFFER_MIN_SIZE
minimum encoding buffer size Used to avoid some checks during header writing.
Definition: avcodec.h:222
#define FFALIGN(x, a)
Definition: macros.h:48
const AVProfile ff_prores_profiles[]
Definition: profiles.c:153
#define av_log(a,...)
static const int bitrate_table[]
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:401
static const uint8_t lev_to_cb[10]
#define src
Definition: vp8dsp.c:254
static const int valid_primaries[]
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:347
static int prores_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
static int put_bits_left(PutBitContext *s)
Definition: put_bits.h:107
static const uint16_t mask[17]
Definition: lzw.c:38
static av_always_inline void chroma(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset_y, int offset_x, int column, int mirror, int jobnr, int nb_jobs)
Definition: vf_waveform.c:1631
static void put_alpha_run(PutBitContext *pb, int run)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
static av_cold int prores_encode_close(AVCodecContext *avctx)
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:563
uint8_t * buf
Definition: put_bits.h:52
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:460
const char * name
Name of the codec implementation.
Definition: codec.h:197
#define IS_NEGATIVE(val)
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
static const int valid_trc[]
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
int8_t exp
Definition: eval.c:72
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:369
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:81
static av_always_inline int get_level(int val)
#define TO_GOLOMB(val)
SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems.
Definition: pixfmt.h:500
#define FFMIN(a, b)
Definition: common.h:96
static void scale_mat(const uint8_t *src, int *dst, int scale)
static void put_alpha_diff(PutBitContext *pb, int cur, int prev)
static const AVProfile profiles[]
#define width
int width
picture width / height.
Definition: avcodec.h:699
#define FF_PROFILE_PRORES_STANDARD
Definition: avcodec.h:1966
#define FF_PROFILE_UNKNOWN
Definition: avcodec.h:1860
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:523
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:465
AVFormatContext * ctx
Definition: movenc.c:48
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define DEFAULT_SLICE_MB_WIDTH
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:438
AVCodec ff_prores_encoder
#define FF_PROFILE_PRORES_4444
Definition: avcodec.h:1968
also ITU-R BT1361
Definition: pixfmt.h:485
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
const uint8_t ff_prores_interlaced_scan[64]
Definition: proresdata.c:36
int frame_size
Definition: mxfenc.c:2166
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
static int encode_slice_plane(int16_t *blocks, int mb_count, uint8_t *buf, unsigned buf_size, int *qmat, int sub_sample_chroma, const uint8_t ff_prores_scan[64])
main external API structure.
Definition: avcodec.h:526
FDCTDSPContext fdsp
static const uint8_t dc_codebook[7]
const uint8_t ff_prores_progressive_scan[64]
Definition: proresdata.c:25
static const int qp_end_table[]
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:551
#define OFFSET(x)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
const char * name
short name for the profile
Definition: codec.h:178
static av_always_inline unsigned encode_slice_data(AVCodecContext *avctx, int16_t *blocks_y, int16_t *blocks_u, int16_t *blocks_v, unsigned mb_count, uint8_t *buf, unsigned data_size, unsigned *y_data_size, unsigned *u_data_size, unsigned *v_data_size, int qp)
#define FF_PROFILE_PRORES_HQ
Definition: avcodec.h:1967
#define QSCALE(qmat, ind, val)
static const AVClass proresaw_enc_class
static void encode_codeword(PutBitContext *pb, int val, int codebook)
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:300
static void subimage_alpha_with_fill(uint16_t *src, unsigned x, unsigned y, unsigned stride, unsigned width, unsigned height, uint16_t *dst, unsigned dst_width, unsigned dst_height, int is_interlaced, int is_top_field)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
uint8_t level
Definition: svq3.c:205
static void fdct_get(FDCTDSPContext *fdsp, uint8_t *pixels, int stride, int16_t *block)
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
AVCodec ff_prores_aw_encoder
static const uint8_t QMAT_LUMA[6][64]
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:115
#define LOCAL_ALIGNED(a, t, v,...)
Definition: internal.h:114
AVProfile.
Definition: codec.h:176
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
static av_always_inline void bytestream_put_buffer(uint8_t **b, const uint8_t *src, unsigned int size)
Definition: bytestream.h:368
ARIB STD-B67, known as "Hybrid log-gamma".
Definition: pixfmt.h:504
void * priv_data
Definition: avcodec.h:553
static void subimage_with_fill_template(uint16_t *src, unsigned x, unsigned y, unsigned stride, unsigned width, unsigned height, uint16_t *dst, unsigned dst_width, unsigned dst_height, int is_alpha_plane, int is_interlaced, int is_top_field)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:663
uint8_t qmat_chroma[64]
Definition: proresdec.h:44
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:460
#define AV_RN64(p)
Definition: intreadwrite.h:368
enum AVColorPrimaries color_primaries
Definition: frame.h:554
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:466
ITU-R BT2020.
Definition: pixfmt.h:469
#define FF_PROFILE_PRORES_PROXY
Definition: avcodec.h:1964
#define av_freep(p)
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:556
#define av_always_inline
Definition: attributes.h:45
static const int valid_colorspace[]
int ff_int_from_list_or_default(void *ctx, const char *val_name, int val, const int *array_valid_values, int default_value)
Check if a value is in the list.
Definition: utils.c:2308
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define DIFF_SIGN(val, sign)
static const uint8_t run_to_cb[16]
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
int i
Definition: input.c:407
#define AV_WN64(p, v)
Definition: intreadwrite.h:380
static void subimage_with_fill(uint16_t *src, unsigned x, unsigned y, unsigned stride, unsigned width, unsigned height, uint16_t *dst, unsigned dst_width, unsigned dst_height, int is_interlaced, int is_top_field)
bitstream writer API