FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <assert.h>
37 #include <stdint.h>
38 
39 #include "libavutil/emms.h"
40 #include "libavutil/internal.h"
41 #include "libavutil/intmath.h"
42 #include "libavutil/mathematics.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/mem_internal.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/thread.h"
47 #include "avcodec.h"
48 #include "encode.h"
49 #include "idctdsp.h"
50 #include "mpeg12data.h"
51 #include "mpeg12enc.h"
52 #include "mpegvideo.h"
53 #include "mpegvideodata.h"
54 #include "mpegvideoenc.h"
55 #include "h261enc.h"
56 #include "h263.h"
57 #include "h263data.h"
58 #include "h263enc.h"
59 #include "mjpegenc_common.h"
60 #include "mathops.h"
61 #include "mpegutils.h"
62 #include "mpegvideo_unquantize.h"
63 #include "mjpegenc.h"
64 #include "speedhqenc.h"
65 #include "msmpeg4enc.h"
66 #include "pixblockdsp.h"
67 #include "qpeldsp.h"
68 #include "faandct.h"
69 #include "aandcttab.h"
70 #include "mpeg4video.h"
71 #include "mpeg4videodata.h"
72 #include "mpeg4videoenc.h"
73 #include "internal.h"
74 #include "bytestream.h"
75 #include "rv10enc.h"
76 #include "libavutil/refstruct.h"
77 #include <limits.h>
78 #include "sp5x.h"
79 
80 #define QUANT_BIAS_SHIFT 8
81 
82 #define QMAT_SHIFT_MMX 16
83 #define QMAT_SHIFT 21
84 
85 static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt);
86 static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
87 static int sse_mb(MPVEncContext *const s);
88 static int dct_quantize_c(MPVEncContext *const s,
89  int16_t *block, int n,
90  int qscale, int *overflow);
91 static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow);
92 
93 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
94 
95 static const AVOption mpv_generic_options[] = {
98  { NULL },
99 };
100 
102  .class_name = "generic mpegvideo encoder",
103  .item_name = av_default_item_name,
104  .option = mpv_generic_options,
105  .version = LIBAVUTIL_VERSION_INT,
106 };
107 
108 void ff_convert_matrix(MPVEncContext *const s, int (*qmat)[64],
109  uint16_t (*qmat16)[2][64],
110  const uint16_t *quant_matrix,
111  int bias, int qmin, int qmax, int intra)
112 {
113  FDCTDSPContext *fdsp = &s->fdsp;
114  int qscale;
115  int shift = 0;
116 
117  for (qscale = qmin; qscale <= qmax; qscale++) {
118  int i;
119  int qscale2;
120 
121  if (s->c.q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
122  else qscale2 = qscale << 1;
123 
124  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
125 #if CONFIG_FAANDCT
126  fdsp->fdct == ff_faandct ||
127 #endif /* CONFIG_FAANDCT */
129  for (i = 0; i < 64; i++) {
130  const int j = s->c.idsp.idct_permutation[i];
131  int64_t den = (int64_t) qscale2 * quant_matrix[j];
132  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
133  * Assume x = qscale2 * quant_matrix[j]
134  * 1 <= x <= 28560
135  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
136  * 4194304 >= (1 << 22) / (x) >= 146 */
137 
138  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
139  }
140  } else if (fdsp->fdct == ff_fdct_ifast) {
141  for (i = 0; i < 64; i++) {
142  const int j = s->c.idsp.idct_permutation[i];
143  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
144  /* 1247 * 1 * 1 <= ff_aanscales[i] * qscale2 * quant_matrix[j] <= 31521 * 112 * 255
145  * Assume x = ff_aanscales[i] * qscale2 * quant_matrix[j]
146  * 1247 <= x <= 900239760
147  * (1 << 36) / 1247 >= (1 << 36) / (x) >= (1 << 36) / 900239760
148  * 55107840 >= (1 << 36) / (x) >= 76 */
149 
150  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
151  }
152  } else {
153  for (i = 0; i < 64; i++) {
154  const int j = s->c.idsp.idct_permutation[i];
155  int64_t den = (int64_t) qscale2 * quant_matrix[j];
156  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
157  * Assume x = qscale2 * quant_matrix[j]
158  * 1 <= x <= 28560
159  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
160  * 4194304 >= (1 << 22) / (x) >= 146
161  *
162  * 1 <= x <= 28560
163  * (1 << 17) / 1 >= (1 << 17) / (x) >= (1 << 17) / 28560
164  * 131072 >= (1 << 17) / (x) >= 4 */
165 
166  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
167  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
168 
169  if (qmat16[qscale][0][i] == 0 ||
170  qmat16[qscale][0][i] == 128 * 256)
171  qmat16[qscale][0][i] = 128 * 256 - 1;
172  qmat16[qscale][1][i] =
173  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
174  qmat16[qscale][0][i]);
175  }
176  }
177 
178  for (i = intra; i < 64; i++) {
179  int64_t max = 8191;
180  if (fdsp->fdct == ff_fdct_ifast) {
181  max = (8191LL * ff_aanscales[i]) >> 14;
182  }
183  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
184  shift++;
185  }
186  }
187  }
188  if (shift) {
189  av_log(s->c.avctx, AV_LOG_INFO,
190  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
191  QMAT_SHIFT - shift);
192  }
193 }
194 
195 static inline void update_qscale(MPVMainEncContext *const m)
196 {
197  MPVEncContext *const s = &m->s;
198 
199  if (s->c.q_scale_type == 1 && 0) {
200  int i;
201  int bestdiff=INT_MAX;
202  int best = 1;
203 
204  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
205  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
206  if (ff_mpeg2_non_linear_qscale[i] < s->c.avctx->qmin ||
207  (ff_mpeg2_non_linear_qscale[i] > s->c.avctx->qmax && !m->vbv_ignore_qmax))
208  continue;
209  if (diff < bestdiff) {
210  bestdiff = diff;
211  best = i;
212  }
213  }
214  s->c.qscale = best;
215  } else {
216  s->c.qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
217  (FF_LAMBDA_SHIFT + 7);
218  s->c.qscale = av_clip(s->c.qscale, s->c.avctx->qmin, m->vbv_ignore_qmax ? 31 : s->c.avctx->qmax);
219  }
220 
221  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
223 }
224 
226 {
227  int i;
228 
229  if (matrix) {
230  put_bits(pb, 1, 1);
231  for (i = 0; i < 64; i++) {
233  }
234  } else
235  put_bits(pb, 1, 0);
236 }
237 
238 /**
239  * init s->c.cur_pic.qscale_table from s->lambda_table
240  */
241 static void init_qscale_tab(MPVEncContext *const s)
242 {
243  int8_t *const qscale_table = s->c.cur_pic.qscale_table;
244 
245  for (int i = 0; i < s->c.mb_num; i++) {
246  unsigned int lam = s->lambda_table[s->c.mb_index2xy[i]];
247  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
248  qscale_table[s->c.mb_index2xy[i]] = av_clip(qp, s->c.avctx->qmin,
249  s->c.avctx->qmax);
250  }
251 }
252 
254  const MPVEncContext *const src)
255 {
256 #define COPY(a) dst->a = src->a
257  COPY(c.pict_type);
258  COPY(f_code);
259  COPY(b_code);
260  COPY(c.qscale);
261  COPY(lambda);
262  COPY(lambda2);
263  COPY(c.frame_pred_frame_dct); // FIXME don't set in encode_header
264  COPY(c.progressive_frame); // FIXME don't set in encode_header
265  COPY(partitioned_frame); // FIXME don't set in encode_header
266 #undef COPY
267 }
268 
270 {
271  for (int i = -16; i < 16; i++)
272  default_fcode_tab[i + MAX_MV] = 1;
273 }
274 
275 /**
276  * Set the given MPVEncContext to defaults for encoding.
277  */
279 {
280  MPVEncContext *const s = &m->s;
281  static AVOnce init_static_once = AV_ONCE_INIT;
282 
284 
285  s->f_code = 1;
286  s->b_code = 1;
287 
288  if (!m->fcode_tab) {
290  ff_thread_once(&init_static_once, mpv_encode_init_static);
291  }
292  if (!s->c.y_dc_scale_table) {
293  s->c.y_dc_scale_table =
294  s->c.c_dc_scale_table = ff_mpeg1_dc_scale_table;
295  }
296 }
297 
299 {
300  s->dct_quantize = dct_quantize_c;
301 
302 #if ARCH_X86
304 #endif
305 
306  if (s->c.avctx->trellis)
307  s->dct_quantize = dct_quantize_trellis_c;
308 }
309 
310 static av_cold void init_unquantize(MPVEncContext *const s2, AVCodecContext *avctx)
311 {
312  MpegEncContext *const s = &s2->c;
313  MPVUnquantDSPContext unquant_dsp_ctx;
314 
315  ff_mpv_unquantize_init(&unquant_dsp_ctx,
316  avctx->flags & AV_CODEC_FLAG_BITEXACT, s->q_scale_type);
317 
318  if (s2->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
319  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg2_intra;
320  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg2_inter;
321  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
322  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_h263_intra;
323  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_h263_inter;
324  } else {
325  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg1_intra;
326  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg1_inter;
327  }
328 }
329 
331 {
332  MPVEncContext *const s = &m->s;
333  MECmpContext mecc;
334  me_cmp_func me_cmp[6];
335  int ret;
336 
337  ff_me_cmp_init(&mecc, avctx);
338  ret = ff_me_init(&s->me, avctx, &mecc, 1);
339  if (ret < 0)
340  return ret;
341  ret = ff_set_cmp(&mecc, me_cmp, m->frame_skip_cmp, 1);
342  if (ret < 0)
343  return ret;
344  m->frame_skip_cmp_fn = me_cmp[1];
345  if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
346  ret = ff_set_cmp(&mecc, me_cmp, avctx->ildct_cmp, 1);
347  if (ret < 0)
348  return ret;
349  if (!me_cmp[0] || !me_cmp[4])
350  return AVERROR(EINVAL);
351  s->ildct_cmp[0] = me_cmp[0];
352  s->ildct_cmp[1] = me_cmp[4];
353  }
354 
355  s->sum_abs_dctelem = mecc.sum_abs_dctelem;
356 
357  s->sse_cmp[0] = mecc.sse[0];
358  s->sse_cmp[1] = mecc.sse[1];
359  s->sad_cmp[0] = mecc.sad[0];
360  s->sad_cmp[1] = mecc.sad[1];
361  if (avctx->mb_cmp == FF_CMP_NSSE) {
362  s->n_sse_cmp[0] = mecc.nsse[0];
363  s->n_sse_cmp[1] = mecc.nsse[1];
364  } else {
365  s->n_sse_cmp[0] = mecc.sse[0];
366  s->n_sse_cmp[1] = mecc.sse[1];
367  }
368 
369  return 0;
370 }
371 
372 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
374 {
375  MPVEncContext *const s = &m->s;
376  const int nb_matrices = 1 + (s->c.out_format == FMT_MJPEG) + !m->intra_only;
377  const uint16_t *intra_matrix, *inter_matrix;
378  int ret;
379 
380  if (!ALLOCZ_ARRAYS(s->q_intra_matrix, 32, nb_matrices) ||
381  !ALLOCZ_ARRAYS(s->q_intra_matrix16, 32, nb_matrices))
382  return AVERROR(ENOMEM);
383 
384  if (s->c.out_format == FMT_MJPEG) {
385  s->q_chroma_intra_matrix = s->q_intra_matrix + 32;
386  s->q_chroma_intra_matrix16 = s->q_intra_matrix16 + 32;
387  // No need to set q_inter_matrix
389  // intra_matrix, chroma_intra_matrix will be set later for MJPEG.
390  return 0;
391  } else {
392  s->q_chroma_intra_matrix = s->q_intra_matrix;
393  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
394  }
395  if (!m->intra_only) {
396  s->q_inter_matrix = s->q_intra_matrix + 32;
397  s->q_inter_matrix16 = s->q_intra_matrix16 + 32;
398  }
399 
400  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 &&
401  s->mpeg_quant) {
402  intra_matrix = ff_mpeg4_default_intra_matrix;
403  inter_matrix = ff_mpeg4_default_non_intra_matrix;
404  } else if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
405  intra_matrix =
406  inter_matrix = ff_mpeg1_default_non_intra_matrix;
407  } else {
408  /* MPEG-1/2, SpeedHQ */
409  intra_matrix = ff_mpeg1_default_intra_matrix;
410  inter_matrix = ff_mpeg1_default_non_intra_matrix;
411  }
412  if (avctx->intra_matrix)
413  intra_matrix = avctx->intra_matrix;
414  if (avctx->inter_matrix)
415  inter_matrix = avctx->inter_matrix;
416 
417  /* init q matrix */
418  for (int i = 0; i < 64; i++) {
419  int j = s->c.idsp.idct_permutation[i];
420 
421  s->c.intra_matrix[j] = s->c.chroma_intra_matrix[j] = intra_matrix[i];
422  s->c.inter_matrix[j] = inter_matrix[i];
423  }
424 
425  /* precompute matrix */
427  if (ret < 0)
428  return ret;
429 
430  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
431  s->c.intra_matrix, s->intra_quant_bias, avctx->qmin,
432  31, 1);
433  if (s->q_inter_matrix)
434  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
435  s->c.inter_matrix, s->inter_quant_bias, avctx->qmin,
436  31, 0);
437 
438  return 0;
439 }
440 
442 {
443  MPVEncContext *const s = &m->s;
444  int has_b_frames = !!m->max_b_frames;
445  int16_t (*mv_table)[2];
446 
447  /* Allocate MB type table */
448  unsigned mb_array_size = s->c.mb_stride * s->c.mb_height;
449  s->mb_type = av_calloc(mb_array_size, 3 * sizeof(*s->mb_type) + sizeof(*s->mb_mean));
450  if (!s->mb_type)
451  return AVERROR(ENOMEM);
452  s->mc_mb_var = s->mb_type + mb_array_size;
453  s->mb_var = s->mc_mb_var + mb_array_size;
454  s->mb_mean = (uint8_t*)(s->mb_var + mb_array_size);
455 
456  if (!FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size))
457  return AVERROR(ENOMEM);
458 
459  unsigned mv_table_size = (s->c.mb_height + 2) * s->c.mb_stride + 1;
460  unsigned nb_mv_tables = 1 + 5 * has_b_frames;
461  if (s->c.codec_id == AV_CODEC_ID_MPEG4 ||
462  (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
463  nb_mv_tables += 8 * has_b_frames;
464  s->p_field_select_table[0] = av_calloc(mv_table_size, 2 * (2 + 4 * has_b_frames));
465  if (!s->p_field_select_table[0])
466  return AVERROR(ENOMEM);
467  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
468  }
469 
470  mv_table = av_calloc(mv_table_size, nb_mv_tables * sizeof(*mv_table));
471  if (!mv_table)
472  return AVERROR(ENOMEM);
473  m->mv_table_base = mv_table;
474  mv_table += s->c.mb_stride + 1;
475 
476  s->p_mv_table = mv_table;
477  if (has_b_frames) {
478  s->b_forw_mv_table = mv_table += mv_table_size;
479  s->b_back_mv_table = mv_table += mv_table_size;
480  s->b_bidir_forw_mv_table = mv_table += mv_table_size;
481  s->b_bidir_back_mv_table = mv_table += mv_table_size;
482  s->b_direct_mv_table = mv_table += mv_table_size;
483 
484  if (s->p_field_select_table[1]) { // MPEG-4 or INTERLACED_ME above
485  uint8_t *field_select = s->p_field_select_table[1];
486  for (int j = 0; j < 2; j++) {
487  for (int k = 0; k < 2; k++) {
488  for (int l = 0; l < 2; l++)
489  s->b_field_mv_table[j][k][l] = mv_table += mv_table_size;
490  s->b_field_select_table[j][k] = field_select += 2 * mv_table_size;
491  }
492  }
493  }
494  }
495 
496  return 0;
497 }
498 
500 {
501  MPVEncContext *const s = &m->s;
502  // Align the following per-thread buffers to avoid false sharing.
503  enum {
504 #ifndef _MSC_VER
505  /// The number is supposed to match/exceed the cache-line size.
506  ALIGN = FFMAX(128, _Alignof(max_align_t)),
507 #else
508  ALIGN = 128,
509 #endif
510  DCT_ERROR_SIZE = FFALIGN(2 * sizeof(*s->dct_error_sum), ALIGN),
511  };
512  static_assert(DCT_ERROR_SIZE * MAX_THREADS + ALIGN - 1 <= SIZE_MAX,
513  "Need checks for potential overflow.");
514  unsigned nb_slices = s->c.slice_context_count;
515  char *dct_error = NULL;
516 
517  if (m->noise_reduction) {
518  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
519  return AVERROR(ENOMEM);
520  dct_error = av_mallocz(ALIGN - 1 + nb_slices * DCT_ERROR_SIZE);
521  if (!dct_error)
522  return AVERROR(ENOMEM);
524  dct_error += FFALIGN((uintptr_t)dct_error, ALIGN) - (uintptr_t)dct_error;
525  }
526 
527  const int y_size = s->c.b8_stride * (2 * s->c.mb_height + 1);
528  const int c_size = s->c.mb_stride * (s->c.mb_height + 1);
529  const int yc_size = y_size + 2 * c_size;
530  ptrdiff_t offset = 0;
531 
532  for (unsigned i = 0; i < nb_slices; ++i) {
533  MPVEncContext *const s2 = s->c.enc_contexts[i];
534 
535  s2->block = s2->blocks[0];
536 
537  if (dct_error) {
538  s2->dct_offset = s->dct_offset;
539  s2->dct_error_sum = (void*)dct_error;
540  dct_error += DCT_ERROR_SIZE;
541  }
542 
543  if (s2->c.ac_val) {
544  s2->c.dc_val += offset + i;
545  s2->c.ac_val += offset;
546  offset += yc_size;
547  }
548  }
549  return 0;
550 }
551 
552 /* init video encoder */
554 {
555  MPVMainEncContext *const m = avctx->priv_data;
556  MPVEncContext *const s = &m->s;
557  AVCPBProperties *cpb_props;
558  int gcd, ret;
559 
561 
562  switch (avctx->pix_fmt) {
563  case AV_PIX_FMT_YUVJ444P:
564  case AV_PIX_FMT_YUV444P:
565  s->c.chroma_format = CHROMA_444;
566  break;
567  case AV_PIX_FMT_YUVJ422P:
568  case AV_PIX_FMT_YUV422P:
569  s->c.chroma_format = CHROMA_422;
570  break;
571  default:
572  av_unreachable("Already checked via CODEC_PIXFMTS");
573  case AV_PIX_FMT_YUVJ420P:
574  case AV_PIX_FMT_YUV420P:
575  s->c.chroma_format = CHROMA_420;
576  break;
577  }
578 
579  avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
580 
581  m->bit_rate = avctx->bit_rate;
582  s->c.width = avctx->width;
583  s->c.height = avctx->height;
584  if (avctx->gop_size > 600 &&
586  av_log(avctx, AV_LOG_WARNING,
587  "keyframe interval too large!, reducing it from %d to %d\n",
588  avctx->gop_size, 600);
589  avctx->gop_size = 600;
590  }
591  m->gop_size = avctx->gop_size;
592  s->c.avctx = avctx;
593  if (avctx->max_b_frames > MPVENC_MAX_B_FRAMES) {
594  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
595  "is " AV_STRINGIFY(MPVENC_MAX_B_FRAMES) ".\n");
597  } else if (avctx->max_b_frames < 0) {
598  av_log(avctx, AV_LOG_ERROR,
599  "max b frames must be 0 or positive for mpegvideo based encoders\n");
600  return AVERROR(EINVAL);
601  }
602  m->max_b_frames = avctx->max_b_frames;
603  s->c.codec_id = avctx->codec->id;
604  if (m->max_b_frames && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
605  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
606  return AVERROR(EINVAL);
607  }
608 
609  s->c.quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
610  s->rtp_mode = !!s->rtp_payload_size;
611  s->c.intra_dc_precision = avctx->intra_dc_precision;
612 
613  // workaround some differences between how applications specify dc precision
614  if (s->c.intra_dc_precision < 0) {
615  s->c.intra_dc_precision += 8;
616  } else if (s->c.intra_dc_precision >= 8)
617  s->c.intra_dc_precision -= 8;
618 
619  if (s->c.intra_dc_precision < 0) {
620  av_log(avctx, AV_LOG_ERROR,
621  "intra dc precision must be positive, note some applications use"
622  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
623  return AVERROR(EINVAL);
624  }
625 
626  if (s->c.intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
627  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
628  return AVERROR(EINVAL);
629  }
631 
632  if (m->gop_size <= 1) {
633  m->intra_only = 1;
634  m->gop_size = 12;
635  } else {
636  m->intra_only = 0;
637  }
638 
639  /* Fixed QSCALE */
640  m->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
641 
642  s->adaptive_quant = (avctx->lumi_masking ||
643  avctx->dark_masking ||
644  avctx->temporal_cplx_masking ||
645  avctx->spatial_cplx_masking ||
646  avctx->p_masking ||
647  m->border_masking ||
648  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
649  !m->fixed_qscale;
650 
651  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
652 
653  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
654  switch(avctx->codec_id) {
657  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
658  break;
659  case AV_CODEC_ID_MPEG4:
663  if (avctx->rc_max_rate >= 15000000) {
664  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
665  } else if(avctx->rc_max_rate >= 2000000) {
666  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
667  } else if(avctx->rc_max_rate >= 384000) {
668  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
669  } else
670  avctx->rc_buffer_size = 40;
671  avctx->rc_buffer_size *= 16384;
672  break;
673  }
674  if (avctx->rc_buffer_size) {
675  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
676  }
677  }
678 
679  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
680  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
681  return AVERROR(EINVAL);
682  }
683 
684  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
685  av_log(avctx, AV_LOG_INFO,
686  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
687  }
688 
689  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
690  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
691  return AVERROR(EINVAL);
692  }
693 
694  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
695  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
696  return AVERROR(EINVAL);
697  }
698 
699  if (avctx->rc_max_rate &&
700  avctx->rc_max_rate == avctx->bit_rate &&
701  avctx->rc_max_rate != avctx->rc_min_rate) {
702  av_log(avctx, AV_LOG_INFO,
703  "impossible bitrate constraints, this will fail\n");
704  }
705 
706  if (avctx->rc_buffer_size &&
707  avctx->bit_rate * (int64_t)avctx->time_base.num >
708  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
709  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
710  return AVERROR(EINVAL);
711  }
712 
713  if (!m->fixed_qscale &&
714  avctx->bit_rate * av_q2d(avctx->time_base) >
715  avctx->bit_rate_tolerance) {
716  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
717  av_log(avctx, AV_LOG_WARNING,
718  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
719  if (nbt <= INT_MAX) {
720  avctx->bit_rate_tolerance = nbt;
721  } else
722  avctx->bit_rate_tolerance = INT_MAX;
723  }
724 
725  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->c.codec_id != AV_CODEC_ID_MPEG4 &&
726  s->c.codec_id != AV_CODEC_ID_H263 && s->c.codec_id != AV_CODEC_ID_H263P &&
727  s->c.codec_id != AV_CODEC_ID_FLV1) {
728  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
729  return AVERROR(EINVAL);
730  }
731 
732  if (s->c.obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
733  av_log(avctx, AV_LOG_ERROR,
734  "OBMC is only supported with simple mb decision\n");
735  return AVERROR(EINVAL);
736  }
737 
738  if (s->c.quarter_sample && s->c.codec_id != AV_CODEC_ID_MPEG4) {
739  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
740  return AVERROR(EINVAL);
741  }
742 
743  if ((s->c.codec_id == AV_CODEC_ID_MPEG4 ||
744  s->c.codec_id == AV_CODEC_ID_H263 ||
745  s->c.codec_id == AV_CODEC_ID_H263P) &&
746  (avctx->sample_aspect_ratio.num > 255 ||
747  avctx->sample_aspect_ratio.den > 255)) {
748  av_log(avctx, AV_LOG_WARNING,
749  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
752  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
753  }
754 
755  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
756  s->c.codec_id == AV_CODEC_ID_H263P) &&
757  (avctx->width > 2048 ||
758  avctx->height > 1152 )) {
759  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
760  return AVERROR(EINVAL);
761  }
762  if (s->c.codec_id == AV_CODEC_ID_FLV1 &&
763  (avctx->width > 65535 ||
764  avctx->height > 65535 )) {
765  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
766  return AVERROR(EINVAL);
767  }
768  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
769  s->c.codec_id == AV_CODEC_ID_H263P ||
770  s->c.codec_id == AV_CODEC_ID_RV20) &&
771  ((avctx->width &3) ||
772  (avctx->height&3) )) {
773  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
774  return AVERROR(EINVAL);
775  }
776 
777  if (s->c.codec_id == AV_CODEC_ID_RV10 &&
778  (avctx->width &15 ||
779  avctx->height&15 )) {
780  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
781  return AVERROR(EINVAL);
782  }
783 
784  if ((s->c.codec_id == AV_CODEC_ID_WMV1 ||
785  s->c.codec_id == AV_CODEC_ID_WMV2) &&
786  avctx->width & 1) {
787  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
788  return AVERROR(EINVAL);
789  }
790 
792  s->c.codec_id != AV_CODEC_ID_MPEG4 && s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO) {
793  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
794  return AVERROR(EINVAL);
795  }
796 
797  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
798  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
799  return AVERROR(EINVAL);
800  }
801 
802  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
803  avctx->mb_decision != FF_MB_DECISION_RD) {
804  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
805  return AVERROR(EINVAL);
806  }
807 
808  if (m->scenechange_threshold < 1000000000 &&
809  (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
810  av_log(avctx, AV_LOG_ERROR,
811  "closed gop with scene change detection are not supported yet, "
812  "set threshold to 1000000000\n");
813  return AVERROR_PATCHWELCOME;
814  }
815 
816  if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
817  if (s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO &&
819  av_log(avctx, AV_LOG_ERROR,
820  "low delay forcing is only available for mpeg2, "
821  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
822  return AVERROR(EINVAL);
823  }
824  if (m->max_b_frames != 0) {
825  av_log(avctx, AV_LOG_ERROR,
826  "B-frames cannot be used with low delay\n");
827  return AVERROR(EINVAL);
828  }
829  }
830 
831  if (avctx->slices > 1 &&
833  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
834  return AVERROR(EINVAL);
835  }
836 
837  if (m->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
838  av_log(avctx, AV_LOG_INFO,
839  "notice: b_frame_strategy only affects the first pass\n");
840  m->b_frame_strategy = 0;
841  }
842 
843  gcd = av_gcd(avctx->time_base.den, avctx->time_base.num);
844  if (gcd > 1) {
845  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
846  avctx->time_base.den /= gcd;
847  avctx->time_base.num /= gcd;
848  //return -1;
849  }
850 
851  if (s->mpeg_quant || s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO || s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO || s->c.codec_id == AV_CODEC_ID_MJPEG || s->c.codec_id == AV_CODEC_ID_AMV || s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
852  // (a + x * 3 / 8) / x
853  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
854  s->inter_quant_bias = 0;
855  } else {
856  s->intra_quant_bias = 0;
857  // (a - x / 4) / x
858  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
859  }
860 
861  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
862  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
863  return AVERROR(EINVAL);
864  }
865 
866  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
867 
868  switch (avctx->codec->id) {
869 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
871  s->rtp_mode = 1;
872  /* fallthrough */
874  s->c.out_format = FMT_MPEG1;
875  s->c.low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
876  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
878  break;
879 #endif
880 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
881  case AV_CODEC_ID_MJPEG:
882  case AV_CODEC_ID_AMV:
883  s->c.out_format = FMT_MJPEG;
884  m->intra_only = 1; /* force intra only for jpeg */
885  avctx->delay = 0;
886  s->c.low_delay = 1;
887  break;
888 #endif
889  case AV_CODEC_ID_SPEEDHQ:
890  s->c.out_format = FMT_SPEEDHQ;
891  m->intra_only = 1; /* force intra only for SHQ */
892  avctx->delay = 0;
893  s->c.low_delay = 1;
894  break;
895  case AV_CODEC_ID_H261:
896  s->c.out_format = FMT_H261;
897  avctx->delay = 0;
898  s->c.low_delay = 1;
899  s->rtp_mode = 0; /* Sliced encoding not supported */
900  break;
901  case AV_CODEC_ID_H263:
902  if (!CONFIG_H263_ENCODER)
905  s->c.width, s->c.height) == 8) {
906  av_log(avctx, AV_LOG_ERROR,
907  "The specified picture size of %dx%d is not valid for "
908  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
909  "352x288, 704x576, and 1408x1152. "
910  "Try H.263+.\n", s->c.width, s->c.height);
911  return AVERROR(EINVAL);
912  }
913  s->c.out_format = FMT_H263;
914  avctx->delay = 0;
915  s->c.low_delay = 1;
916  break;
917  case AV_CODEC_ID_H263P:
918  s->c.out_format = FMT_H263;
919  /* Fx */
920  s->c.h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
921  s->modified_quant = s->c.h263_aic;
922  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
923  s->me.unrestricted_mv = s->c.obmc || s->loop_filter || s->umvplus;
924  s->flipflop_rounding = 1;
925 
926  /* /Fx */
927  /* These are just to be sure */
928  avctx->delay = 0;
929  s->c.low_delay = 1;
930  break;
931  case AV_CODEC_ID_FLV1:
932  s->c.out_format = FMT_H263;
933  s->me.unrestricted_mv = 1;
934  s->rtp_mode = 0; /* don't allow GOB */
935  avctx->delay = 0;
936  s->c.low_delay = 1;
937  break;
938 #if CONFIG_RV10_ENCODER
939  case AV_CODEC_ID_RV10:
941  s->c.out_format = FMT_H263;
942  avctx->delay = 0;
943  s->c.low_delay = 1;
944  break;
945 #endif
946 #if CONFIG_RV20_ENCODER
947  case AV_CODEC_ID_RV20:
949  s->c.out_format = FMT_H263;
950  avctx->delay = 0;
951  s->c.low_delay = 1;
952  s->modified_quant = 1;
953  // Set here to force allocation of dc_val;
954  // will be set later on a per-frame basis.
955  s->c.h263_aic = 1;
956  s->loop_filter = 1;
957  s->me.unrestricted_mv = 0;
958  break;
959 #endif
960  case AV_CODEC_ID_MPEG4:
961  s->c.out_format = FMT_H263;
962  s->c.h263_pred = 1;
963  s->me.unrestricted_mv = 1;
964  s->flipflop_rounding = 1;
965  s->c.low_delay = m->max_b_frames ? 0 : 1;
966  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
967  break;
969  s->c.out_format = FMT_H263;
970  s->c.h263_pred = 1;
971  s->me.unrestricted_mv = 1;
972  s->c.msmpeg4_version = MSMP4_V2;
973  avctx->delay = 0;
974  s->c.low_delay = 1;
975  break;
977  s->c.out_format = FMT_H263;
978  s->c.h263_pred = 1;
979  s->me.unrestricted_mv = 1;
980  s->c.msmpeg4_version = MSMP4_V3;
981  s->flipflop_rounding = 1;
982  avctx->delay = 0;
983  s->c.low_delay = 1;
984  break;
985  case AV_CODEC_ID_WMV1:
986  s->c.out_format = FMT_H263;
987  s->c.h263_pred = 1;
988  s->me.unrestricted_mv = 1;
989  s->c.msmpeg4_version = MSMP4_WMV1;
990  s->flipflop_rounding = 1;
991  avctx->delay = 0;
992  s->c.low_delay = 1;
993  break;
994  case AV_CODEC_ID_WMV2:
995  s->c.out_format = FMT_H263;
996  s->c.h263_pred = 1;
997  s->me.unrestricted_mv = 1;
998  s->c.msmpeg4_version = MSMP4_WMV2;
999  s->flipflop_rounding = 1;
1000  avctx->delay = 0;
1001  s->c.low_delay = 1;
1002  break;
1003  default:
1004  av_unreachable("List contains all codecs using ff_mpv_encode_init()");
1005  }
1006 
1007  avctx->has_b_frames = !s->c.low_delay;
1008 
1009  s->c.encoding = 1;
1010 
1011  s->c.progressive_frame =
1012  s->c.progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
1014  s->c.alternate_scan);
1015 
1016  if (avctx->flags & AV_CODEC_FLAG_PSNR || avctx->mb_decision == FF_MB_DECISION_RD ||
1018  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1019  (1 << AV_PICTURE_TYPE_P) |
1020  (1 << AV_PICTURE_TYPE_B);
1021  } else if (!m->intra_only) {
1022  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1023  (1 << AV_PICTURE_TYPE_P);
1024  } else {
1025  s->frame_reconstruction_bitfield = 0;
1026  }
1027 
1028  if (m->lmin > m->lmax) {
1029  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", m->lmax);
1030  m->lmin = m->lmax;
1031  }
1032 
1033  /* ff_mpv_init_duplicate_contexts() will copy (memdup) the contents of the
1034  * main slice to the slice contexts, so we initialize various fields of it
1035  * before calling ff_mpv_init_duplicate_contexts(). */
1036  s->parent = m;
1037  ff_mpv_idct_init(&s->c);
1038  init_unquantize(s, avctx);
1039  ff_fdctdsp_init(&s->fdsp, avctx);
1040  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
1041  ff_pixblockdsp_init(&s->pdsp, 8);
1042  ret = me_cmp_init(m, avctx);
1043  if (ret < 0)
1044  return ret;
1045 
1046  if (!(avctx->stats_out = av_mallocz(256)) ||
1047  !(s->new_pic = av_frame_alloc()) ||
1048  !(s->c.picture_pool = ff_mpv_alloc_pic_pool(0)))
1049  return AVERROR(ENOMEM);
1050 
1051  ret = init_matrices(m, avctx);
1052  if (ret < 0)
1053  return ret;
1054 
1056 
1057  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263) {
1059 #if CONFIG_MSMPEG4ENC
1060  if (s->c.msmpeg4_version != MSMP4_UNUSED)
1062 #endif
1063  }
1064 
1065  s->c.slice_ctx_size = sizeof(*s);
1066  ret = ff_mpv_common_init(&s->c);
1067  if (ret < 0)
1068  return ret;
1069  ret = init_buffers(m);
1070  if (ret < 0)
1071  return ret;
1072  if (s->c.slice_context_count > 1) {
1073  s->rtp_mode = 1;
1074  if (avctx->codec_id == AV_CODEC_ID_H263P)
1075  s->h263_slice_structured = 1;
1076  }
1078  if (ret < 0)
1079  return ret;
1080 
1081  ret = init_slice_buffers(m);
1082  if (ret < 0)
1083  return ret;
1084 
1086  if (ret < 0)
1087  return ret;
1088 
1089  if (m->b_frame_strategy == 2) {
1090  for (int i = 0; i < m->max_b_frames + 2; i++) {
1091  m->tmp_frames[i] = av_frame_alloc();
1092  if (!m->tmp_frames[i])
1093  return AVERROR(ENOMEM);
1094 
1096  m->tmp_frames[i]->width = s->c.width >> m->brd_scale;
1097  m->tmp_frames[i]->height = s->c.height >> m->brd_scale;
1098 
1099  ret = av_frame_get_buffer(m->tmp_frames[i], 0);
1100  if (ret < 0)
1101  return ret;
1102  }
1103  }
1104 
1105  cpb_props = ff_encode_add_cpb_side_data(avctx);
1106  if (!cpb_props)
1107  return AVERROR(ENOMEM);
1108  cpb_props->max_bitrate = avctx->rc_max_rate;
1109  cpb_props->min_bitrate = avctx->rc_min_rate;
1110  cpb_props->avg_bitrate = avctx->bit_rate;
1111  cpb_props->buffer_size = avctx->rc_buffer_size;
1112 
1113  return 0;
1114 }
1115 
1117 {
1118  MPVMainEncContext *const m = avctx->priv_data;
1119  MPVEncContext *const s = &m->s;
1120 
1122 
1123  ff_mpv_common_end(&s->c);
1124  av_refstruct_pool_uninit(&s->c.picture_pool);
1125 
1126  for (int i = 0; i < MPVENC_MAX_B_FRAMES + 1; i++) {
1129  }
1130  for (int i = 0; i < FF_ARRAY_ELEMS(m->tmp_frames); i++)
1131  av_frame_free(&m->tmp_frames[i]);
1132 
1133  av_frame_free(&s->new_pic);
1134 
1135  av_freep(&avctx->stats_out);
1136 
1137  av_freep(&m->mv_table_base);
1138  av_freep(&s->p_field_select_table[0]);
1140 
1141  av_freep(&s->mb_type);
1142  av_freep(&s->lambda_table);
1143 
1144  av_freep(&s->q_intra_matrix);
1145  av_freep(&s->q_intra_matrix16);
1146  av_freep(&s->dct_offset);
1147 
1148  return 0;
1149 }
1150 
1151 /* put block[] to dest[] */
1152 static inline void put_dct(MPVEncContext *const s,
1153  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1154 {
1155  s->c.dct_unquantize_intra(&s->c, block, i, qscale);
1156  s->c.idsp.idct_put(dest, line_size, block);
1157 }
1158 
1159 static inline void add_dequant_dct(MPVEncContext *const s,
1160  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1161 {
1162  if (s->c.block_last_index[i] >= 0) {
1163  s->c.dct_unquantize_inter(&s->c, block, i, qscale);
1164 
1165  s->c.idsp.idct_add(dest, line_size, block);
1166  }
1167 }
1168 
1169 /**
1170  * Performs dequantization and IDCT (if necessary)
1171  */
1172 static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
1173 {
1174  if (s->c.avctx->debug & FF_DEBUG_DCT_COEFF) {
1175  /* print DCT coefficients */
1176  av_log(s->c.avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->c.mb_x, s->c.mb_y);
1177  for (int i = 0; i < 6; i++) {
1178  for (int j = 0; j < 64; j++) {
1179  av_log(s->c.avctx, AV_LOG_DEBUG, "%5d",
1180  block[i][s->c.idsp.idct_permutation[j]]);
1181  }
1182  av_log(s->c.avctx, AV_LOG_DEBUG, "\n");
1183  }
1184  }
1185 
1186  if ((1 << s->c.pict_type) & s->frame_reconstruction_bitfield) {
1187  uint8_t *dest_y = s->c.dest[0], *dest_cb = s->c.dest[1], *dest_cr = s->c.dest[2];
1188  int dct_linesize, dct_offset;
1189  const int linesize = s->c.cur_pic.linesize[0];
1190  const int uvlinesize = s->c.cur_pic.linesize[1];
1191  const int block_size = 8;
1192 
1193  dct_linesize = linesize << s->c.interlaced_dct;
1194  dct_offset = s->c.interlaced_dct ? linesize : linesize * block_size;
1195 
1196  if (!s->c.mb_intra) {
1197  /* No MC, as that was already done otherwise */
1198  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1199  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1200  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1201  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1202 
1203  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1204  if (s->c.chroma_y_shift) {
1205  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1206  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1207  } else {
1208  dct_linesize >>= 1;
1209  dct_offset >>= 1;
1210  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1211  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1212  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1213  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1214  }
1215  }
1216  } else {
1217  /* dct only in intra block */
1218  put_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1219  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1220  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1221  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1222 
1223  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1224  if (s->c.chroma_y_shift) {
1225  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1226  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1227  } else {
1228  dct_offset >>= 1;
1229  dct_linesize >>= 1;
1230  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1231  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1232  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1233  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1234  }
1235  }
1236  }
1237  }
1238 }
1239 
1240 static int get_sae(const uint8_t *src, int ref, int stride)
1241 {
1242  int x,y;
1243  int acc = 0;
1244 
1245  for (y = 0; y < 16; y++) {
1246  for (x = 0; x < 16; x++) {
1247  acc += FFABS(src[x + y * stride] - ref);
1248  }
1249  }
1250 
1251  return acc;
1252 }
1253 
1254 static int get_intra_count(MPVEncContext *const s, const uint8_t *src,
1255  const uint8_t *ref, int stride)
1256 {
1257  int x, y, w, h;
1258  int acc = 0;
1259 
1260  w = s->c.width & ~15;
1261  h = s->c.height & ~15;
1262 
1263  for (y = 0; y < h; y += 16) {
1264  for (x = 0; x < w; x += 16) {
1265  int offset = x + y * stride;
1266  int sad = s->sad_cmp[0](NULL, src + offset, ref + offset,
1267  stride, 16);
1268  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1269  int sae = get_sae(src + offset, mean, stride);
1270 
1271  acc += sae + 500 < sad;
1272  }
1273  }
1274  return acc;
1275 }
1276 
1277 /**
1278  * Allocates new buffers for an AVFrame and copies the properties
1279  * from another AVFrame.
1280  */
1281 static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
1282 {
1283  AVCodecContext *avctx = s->c.avctx;
1284  int ret;
1285 
1286  f->width = avctx->width + 2 * EDGE_WIDTH;
1287  f->height = avctx->height + 2 * EDGE_WIDTH;
1288 
1289  ret = ff_encode_alloc_frame(avctx, f);
1290  if (ret < 0)
1291  return ret;
1292 
1293  ret = ff_mpv_pic_check_linesize(avctx, f, &s->c.linesize, &s->c.uvlinesize);
1294  if (ret < 0)
1295  return ret;
1296 
1297  for (int i = 0; f->data[i]; i++) {
1298  int offset = (EDGE_WIDTH >> (i ? s->c.chroma_y_shift : 0)) *
1299  f->linesize[i] +
1300  (EDGE_WIDTH >> (i ? s->c.chroma_x_shift : 0));
1301  f->data[i] += offset;
1302  }
1303  f->width = avctx->width;
1304  f->height = avctx->height;
1305 
1306  ret = av_frame_copy_props(f, props_frame);
1307  if (ret < 0)
1308  return ret;
1309 
1310  return 0;
1311 }
1312 
1313 static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
1314 {
1315  MPVEncContext *const s = &m->s;
1316  MPVPicture *pic = NULL;
1317  int64_t pts;
1318  int display_picture_number = 0, ret;
1319  int encoding_delay = m->max_b_frames ? m->max_b_frames
1320  : (s->c.low_delay ? 0 : 1);
1321  int flush_offset = 1;
1322  int direct = 1;
1323 
1324  av_assert1(!m->input_picture[0]);
1325 
1326  if (pic_arg) {
1327  pts = pic_arg->pts;
1328  display_picture_number = m->input_picture_number++;
1329 
1330  if (pts != AV_NOPTS_VALUE) {
1331  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1332  int64_t last = m->user_specified_pts;
1333 
1334  if (pts <= last) {
1335  av_log(s->c.avctx, AV_LOG_ERROR,
1336  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1337  pts, last);
1338  return AVERROR(EINVAL);
1339  }
1340 
1341  if (!s->c.low_delay && display_picture_number == 1)
1342  m->dts_delta = pts - last;
1343  }
1344  m->user_specified_pts = pts;
1345  } else {
1346  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1347  m->user_specified_pts =
1348  pts = m->user_specified_pts + 1;
1349  av_log(s->c.avctx, AV_LOG_INFO,
1350  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1351  pts);
1352  } else {
1353  pts = display_picture_number;
1354  }
1355  }
1356 
1357  if (pic_arg->linesize[0] != s->c.linesize ||
1358  pic_arg->linesize[1] != s->c.uvlinesize ||
1359  pic_arg->linesize[2] != s->c.uvlinesize)
1360  direct = 0;
1361  if ((s->c.width & 15) || (s->c.height & 15))
1362  direct = 0;
1363  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1364  direct = 0;
1365  if (s->c.linesize & (STRIDE_ALIGN-1))
1366  direct = 0;
1367 
1368  ff_dlog(s->c.avctx, "%d %d %td %td\n", pic_arg->linesize[0],
1369  pic_arg->linesize[1], s->c.linesize, s->c.uvlinesize);
1370 
1371  pic = av_refstruct_pool_get(s->c.picture_pool);
1372  if (!pic)
1373  return AVERROR(ENOMEM);
1374 
1375  if (direct) {
1376  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1377  goto fail;
1378  pic->shared = 1;
1379  } else {
1380  ret = prepare_picture(s, pic->f, pic_arg);
1381  if (ret < 0)
1382  goto fail;
1383 
1384  for (int i = 0; i < 3; i++) {
1385  ptrdiff_t src_stride = pic_arg->linesize[i];
1386  ptrdiff_t dst_stride = i ? s->c.uvlinesize : s->c.linesize;
1387  int h_shift = i ? s->c.chroma_x_shift : 0;
1388  int v_shift = i ? s->c.chroma_y_shift : 0;
1389  int w = AV_CEIL_RSHIFT(s->c.width , h_shift);
1390  int h = AV_CEIL_RSHIFT(s->c.height, v_shift);
1391  const uint8_t *src = pic_arg->data[i];
1392  uint8_t *dst = pic->f->data[i];
1393  int vpad = 16;
1394 
1395  if ( s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO
1396  && !s->c.progressive_sequence
1397  && FFALIGN(s->c.height, 32) - s->c.height > 16)
1398  vpad = 32;
1399 
1400  if (!s->c.avctx->rc_buffer_size)
1401  dst += INPLACE_OFFSET;
1402 
1403  if (src_stride == dst_stride)
1404  memcpy(dst, src, src_stride * h - src_stride + w);
1405  else {
1406  int h2 = h;
1407  uint8_t *dst2 = dst;
1408  while (h2--) {
1409  memcpy(dst2, src, w);
1410  dst2 += dst_stride;
1411  src += src_stride;
1412  }
1413  }
1414  if ((s->c.width & 15) || (s->c.height & (vpad-1))) {
1415  s->mpvencdsp.draw_edges(dst, dst_stride,
1416  w, h,
1417  16 >> h_shift,
1418  vpad >> v_shift,
1419  EDGE_BOTTOM);
1420  }
1421  }
1422  }
1423 
1424  pic->display_picture_number = display_picture_number;
1425  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1426  } else if (!m->reordered_input_picture[1]) {
1427  /* Flushing: When the above check is true, the encoder is about to run
1428  * out of frames to encode. Check if there are input_pictures left;
1429  * if so, ensure m->input_picture[0] contains the first picture.
1430  * A flush_offset != 1 will only happen if we did not receive enough
1431  * input frames. */
1432  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1433  if (m->input_picture[flush_offset])
1434  break;
1435 
1436  encoding_delay -= flush_offset - 1;
1437  }
1438 
1439  /* shift buffer entries */
1440  for (int i = flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1441  m->input_picture[i - flush_offset] = m->input_picture[i];
1442  for (int i = MPVENC_MAX_B_FRAMES + 1 - flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1443  m->input_picture[i] = NULL;
1444 
1445  m->input_picture[encoding_delay] = pic;
1446 
1447  return 0;
1448 fail:
1449  av_refstruct_unref(&pic);
1450  return ret;
1451 }
1452 
1453 static int skip_check(MPVMainEncContext *const m,
1454  const MPVPicture *p, const MPVPicture *ref)
1455 {
1456  MPVEncContext *const s = &m->s;
1457  int score = 0;
1458  int64_t score64 = 0;
1459 
1460  for (int plane = 0; plane < 3; plane++) {
1461  const int stride = p->f->linesize[plane];
1462  const int bw = plane ? 1 : 2;
1463  for (int y = 0; y < s->c.mb_height * bw; y++) {
1464  for (int x = 0; x < s->c.mb_width * bw; x++) {
1465  int off = p->shared ? 0 : 16;
1466  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1467  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1468  int v = m->frame_skip_cmp_fn(s, dptr, rptr, stride, 8);
1469 
1470  switch (FFABS(m->frame_skip_exp)) {
1471  case 0: score = FFMAX(score, v); break;
1472  case 1: score += FFABS(v); break;
1473  case 2: score64 += v * (int64_t)v; break;
1474  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1475  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1476  }
1477  }
1478  }
1479  }
1480  emms_c();
1481 
1482  if (score)
1483  score64 = score;
1484  if (m->frame_skip_exp < 0)
1485  score64 = pow(score64 / (double)(s->c.mb_width * s->c.mb_height),
1486  -1.0/m->frame_skip_exp);
1487 
1488  if (score64 < m->frame_skip_threshold)
1489  return 1;
1490  if (score64 < ((m->frame_skip_factor * (int64_t) s->lambda) >> 8))
1491  return 1;
1492  return 0;
1493 }
1494 
1496 {
1497  int ret;
1498  int size = 0;
1499 
1501  if (ret < 0)
1502  return ret;
1503 
1504  do {
1506  if (ret >= 0) {
1507  size += pkt->size;
1509  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1510  return ret;
1511  } while (ret >= 0);
1512 
1513  return size;
1514 }
1515 
1517 {
1518  MPVEncContext *const s = &m->s;
1519  AVPacket *pkt;
1520  const int scale = m->brd_scale;
1521  int width = s->c.width >> scale;
1522  int height = s->c.height >> scale;
1523  int out_size, p_lambda, b_lambda, lambda2;
1524  int64_t best_rd = INT64_MAX;
1525  int best_b_count = -1;
1526  int ret = 0;
1527 
1528  av_assert0(scale >= 0 && scale <= 3);
1529 
1530  pkt = av_packet_alloc();
1531  if (!pkt)
1532  return AVERROR(ENOMEM);
1533 
1534  p_lambda = m->last_lambda_for[AV_PICTURE_TYPE_P];
1535  //p_lambda * FFABS(s->c.avctx->b_quant_factor) + s->c.avctx->b_quant_offset;
1536  b_lambda = m->last_lambda_for[AV_PICTURE_TYPE_B];
1537  if (!b_lambda) // FIXME we should do this somewhere else
1538  b_lambda = p_lambda;
1539  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1541 
1542  for (int i = 0; i < m->max_b_frames + 2; i++) {
1543  const MPVPicture *pre_input_ptr = i ? m->input_picture[i - 1] :
1544  s->c.next_pic.ptr;
1545 
1546  if (pre_input_ptr) {
1547  const uint8_t *data[4];
1548  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1549 
1550  if (!pre_input_ptr->shared && i) {
1551  data[0] += INPLACE_OFFSET;
1552  data[1] += INPLACE_OFFSET;
1553  data[2] += INPLACE_OFFSET;
1554  }
1555 
1556  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[0],
1557  m->tmp_frames[i]->linesize[0],
1558  data[0],
1559  pre_input_ptr->f->linesize[0],
1560  width, height);
1561  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[1],
1562  m->tmp_frames[i]->linesize[1],
1563  data[1],
1564  pre_input_ptr->f->linesize[1],
1565  width >> 1, height >> 1);
1566  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[2],
1567  m->tmp_frames[i]->linesize[2],
1568  data[2],
1569  pre_input_ptr->f->linesize[2],
1570  width >> 1, height >> 1);
1571  }
1572  }
1573 
1574  for (int j = 0; j < m->max_b_frames + 1; j++) {
1575  AVCodecContext *c;
1576  int64_t rd = 0;
1577 
1578  if (!m->input_picture[j])
1579  break;
1580 
1582  if (!c) {
1583  ret = AVERROR(ENOMEM);
1584  goto fail;
1585  }
1586 
1587  c->width = width;
1588  c->height = height;
1590  c->flags |= s->c.avctx->flags & AV_CODEC_FLAG_QPEL;
1591  c->mb_decision = s->c.avctx->mb_decision;
1592  c->me_cmp = s->c.avctx->me_cmp;
1593  c->mb_cmp = s->c.avctx->mb_cmp;
1594  c->me_sub_cmp = s->c.avctx->me_sub_cmp;
1595  c->pix_fmt = AV_PIX_FMT_YUV420P;
1596  c->time_base = s->c.avctx->time_base;
1597  c->max_b_frames = m->max_b_frames;
1598 
1599  ret = avcodec_open2(c, s->c.avctx->codec, NULL);
1600  if (ret < 0)
1601  goto fail;
1602 
1603 
1605  m->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1606 
1607  out_size = encode_frame(c, m->tmp_frames[0], pkt);
1608  if (out_size < 0) {
1609  ret = out_size;
1610  goto fail;
1611  }
1612 
1613  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1614 
1615  for (int i = 0; i < m->max_b_frames + 1; i++) {
1616  int is_p = i % (j + 1) == j || i == m->max_b_frames;
1617 
1618  m->tmp_frames[i + 1]->pict_type = is_p ?
1620  m->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1621 
1622  out_size = encode_frame(c, m->tmp_frames[i + 1], pkt);
1623  if (out_size < 0) {
1624  ret = out_size;
1625  goto fail;
1626  }
1627 
1628  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1629  }
1630 
1631  /* get the delayed frames */
1633  if (out_size < 0) {
1634  ret = out_size;
1635  goto fail;
1636  }
1637  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1638 
1639  rd += c->error[0] + c->error[1] + c->error[2];
1640 
1641  if (rd < best_rd) {
1642  best_rd = rd;
1643  best_b_count = j;
1644  }
1645 
1646 fail:
1649  if (ret < 0) {
1650  best_b_count = ret;
1651  break;
1652  }
1653  }
1654 
1655  av_packet_free(&pkt);
1656 
1657  return best_b_count;
1658 }
1659 
1660 /**
1661  * Determines whether an input picture is discarded or not
1662  * and if not determines the length of the next chain of B frames
1663  * and moves these pictures (including the P frame) into
1664  * reordered_input_picture.
1665  * input_picture[0] is always NULL when exiting this function, even on error;
1666  * reordered_input_picture[0] is always NULL when exiting this function on error.
1667  */
1669 {
1670  MPVEncContext *const s = &m->s;
1671 
1672  /* Either nothing to do or can't do anything */
1673  if (m->reordered_input_picture[0] || !m->input_picture[0])
1674  return 0;
1675 
1676  /* set next picture type & ordering */
1677  if (m->frame_skip_threshold || m->frame_skip_factor) {
1678  if (m->picture_in_gop_number < m->gop_size &&
1679  s->c.next_pic.ptr &&
1680  skip_check(m, m->input_picture[0], s->c.next_pic.ptr)) {
1681  // FIXME check that the gop check above is +-1 correct
1683 
1684  ff_vbv_update(m, 0);
1685 
1686  return 0;
1687  }
1688  }
1689 
1690  if (/* m->picture_in_gop_number >= m->gop_size || */
1691  !s->c.next_pic.ptr || m->intra_only) {
1692  m->reordered_input_picture[0] = m->input_picture[0];
1693  m->input_picture[0] = NULL;
1696  m->coded_picture_number++;
1697  } else {
1698  int b_frames = 0;
1699 
1700  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
1701  for (int i = 0; i < m->max_b_frames + 1; i++) {
1702  int pict_num = m->input_picture[0]->display_picture_number + i;
1703 
1704  if (pict_num >= m->rc_context.num_entries)
1705  break;
1706  if (!m->input_picture[i]) {
1707  m->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1708  break;
1709  }
1710 
1711  m->input_picture[i]->f->pict_type =
1712  m->rc_context.entry[pict_num].new_pict_type;
1713  }
1714  }
1715 
1716  if (m->b_frame_strategy == 0) {
1717  b_frames = m->max_b_frames;
1718  while (b_frames && !m->input_picture[b_frames])
1719  b_frames--;
1720  } else if (m->b_frame_strategy == 1) {
1721  for (int i = 1; i < m->max_b_frames + 1; i++) {
1722  if (m->input_picture[i] &&
1723  m->input_picture[i]->b_frame_score == 0) {
1726  m->input_picture[i ]->f->data[0],
1727  m->input_picture[i - 1]->f->data[0],
1728  s->c.linesize) + 1;
1729  }
1730  }
1731  for (int i = 0;; i++) {
1732  if (i >= m->max_b_frames + 1 ||
1733  !m->input_picture[i] ||
1734  m->input_picture[i]->b_frame_score - 1 >
1735  s->c.mb_num / m->b_sensitivity) {
1736  b_frames = FFMAX(0, i - 1);
1737  break;
1738  }
1739  }
1740 
1741  /* reset scores */
1742  for (int i = 0; i < b_frames + 1; i++)
1743  m->input_picture[i]->b_frame_score = 0;
1744  } else if (m->b_frame_strategy == 2) {
1745  b_frames = estimate_best_b_count(m);
1746  if (b_frames < 0) {
1748  return b_frames;
1749  }
1750  }
1751 
1752  for (int i = b_frames - 1; i >= 0; i--) {
1753  int type = m->input_picture[i]->f->pict_type;
1754  if (type && type != AV_PICTURE_TYPE_B)
1755  b_frames = i;
1756  }
1757  if (m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1758  b_frames == m->max_b_frames) {
1759  av_log(s->c.avctx, AV_LOG_ERROR,
1760  "warning, too many B-frames in a row\n");
1761  }
1762 
1763  if (m->picture_in_gop_number + b_frames >= m->gop_size) {
1764  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1765  m->gop_size > m->picture_in_gop_number) {
1766  b_frames = m->gop_size - m->picture_in_gop_number - 1;
1767  } else {
1768  if (s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1769  b_frames = 0;
1770  m->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1771  }
1772  }
1773 
1774  if ((s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1775  m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1776  b_frames--;
1777 
1778  m->reordered_input_picture[0] = m->input_picture[b_frames];
1779  m->input_picture[b_frames] = NULL;
1783  m->coded_picture_number++;
1784  for (int i = 0; i < b_frames; i++) {
1785  m->reordered_input_picture[i + 1] = m->input_picture[i];
1786  m->input_picture[i] = NULL;
1787  m->reordered_input_picture[i + 1]->f->pict_type =
1790  m->coded_picture_number++;
1791  }
1792  }
1793 
1794  return 0;
1795 }
1796 
1798 {
1799  MPVEncContext *const s = &m->s;
1800  int ret;
1801 
1803 
1804  for (int i = 1; i <= MPVENC_MAX_B_FRAMES; i++)
1807 
1809  av_assert1(!m->input_picture[0]);
1810  if (ret < 0)
1811  return ret;
1812 
1813  av_frame_unref(s->new_pic);
1814 
1815  if (m->reordered_input_picture[0]) {
1818 
1819  if (m->reordered_input_picture[0]->shared || s->c.avctx->rc_buffer_size) {
1820  // input is a shared pix, so we can't modify it -> allocate a new
1821  // one & ensure that the shared one is reusable
1822  av_frame_move_ref(s->new_pic, m->reordered_input_picture[0]->f);
1823 
1824  ret = prepare_picture(s, m->reordered_input_picture[0]->f, s->new_pic);
1825  if (ret < 0)
1826  goto fail;
1827  } else {
1828  // input is not a shared pix -> reuse buffer for current_pix
1829  ret = av_frame_ref(s->new_pic, m->reordered_input_picture[0]->f);
1830  if (ret < 0)
1831  goto fail;
1832  for (int i = 0; i < MPV_MAX_PLANES; i++)
1833  s->new_pic->data[i] += INPLACE_OFFSET;
1834  }
1835  s->c.cur_pic.ptr = m->reordered_input_picture[0];
1836  m->reordered_input_picture[0] = NULL;
1837  av_assert1(s->c.mb_width == s->c.buffer_pools.alloc_mb_width);
1838  av_assert1(s->c.mb_height == s->c.buffer_pools.alloc_mb_height);
1839  av_assert1(s->c.mb_stride == s->c.buffer_pools.alloc_mb_stride);
1840  ret = ff_mpv_alloc_pic_accessories(s->c.avctx, &s->c.cur_pic,
1841  &s->c.sc, &s->c.buffer_pools, s->c.mb_height);
1842  if (ret < 0) {
1843  ff_mpv_unref_picture(&s->c.cur_pic);
1844  return ret;
1845  }
1846  s->picture_number = s->c.cur_pic.ptr->display_picture_number;
1847 
1848  }
1849  return 0;
1850 fail:
1852  return ret;
1853 }
1854 
1855 static void frame_end(MPVMainEncContext *const m)
1856 {
1857  MPVEncContext *const s = &m->s;
1858 
1859  if (s->me.unrestricted_mv &&
1860  s->c.cur_pic.reference &&
1861  !m->intra_only) {
1862  int hshift = s->c.chroma_x_shift;
1863  int vshift = s->c.chroma_y_shift;
1864  s->mpvencdsp.draw_edges(s->c.cur_pic.data[0],
1865  s->c.cur_pic.linesize[0],
1866  s->c.h_edge_pos, s->c.v_edge_pos,
1868  EDGE_TOP | EDGE_BOTTOM);
1869  s->mpvencdsp.draw_edges(s->c.cur_pic.data[1],
1870  s->c.cur_pic.linesize[1],
1871  s->c.h_edge_pos >> hshift,
1872  s->c.v_edge_pos >> vshift,
1873  EDGE_WIDTH >> hshift,
1874  EDGE_WIDTH >> vshift,
1875  EDGE_TOP | EDGE_BOTTOM);
1876  s->mpvencdsp.draw_edges(s->c.cur_pic.data[2],
1877  s->c.cur_pic.linesize[2],
1878  s->c.h_edge_pos >> hshift,
1879  s->c.v_edge_pos >> vshift,
1880  EDGE_WIDTH >> hshift,
1881  EDGE_WIDTH >> vshift,
1882  EDGE_TOP | EDGE_BOTTOM);
1883  }
1884 
1885  m->last_pict_type = s->c.pict_type;
1886  m->last_lambda_for[s->c.pict_type] = s->c.cur_pic.ptr->f->quality;
1887  if (s->c.pict_type != AV_PICTURE_TYPE_B)
1888  m->last_non_b_pict_type = s->c.pict_type;
1889 }
1890 
1892 {
1893  MPVEncContext *const s = &m->s;
1894  int intra, i;
1895 
1896  for (intra = 0; intra < 2; intra++) {
1897  if (s->dct_count[intra] > (1 << 16)) {
1898  for (i = 0; i < 64; i++) {
1899  s->dct_error_sum[intra][i] >>= 1;
1900  }
1901  s->dct_count[intra] >>= 1;
1902  }
1903 
1904  for (i = 0; i < 64; i++) {
1905  s->dct_offset[intra][i] = (m->noise_reduction *
1906  s->dct_count[intra] +
1907  s->dct_error_sum[intra][i] / 2) /
1908  (s->dct_error_sum[intra][i] + 1);
1909  }
1910  }
1911 }
1912 
1913 static void frame_start(MPVMainEncContext *const m)
1914 {
1915  MPVEncContext *const s = &m->s;
1916 
1917  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
1918 
1919  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
1920  ff_mpv_replace_picture(&s->c.last_pic, &s->c.next_pic);
1921  ff_mpv_replace_picture(&s->c.next_pic, &s->c.cur_pic);
1922  }
1923 
1924  av_assert2(!!m->noise_reduction == !!s->dct_error_sum);
1925  if (s->dct_error_sum) {
1927  }
1928 }
1929 
1931  const AVFrame *pic_arg, int *got_packet)
1932 {
1933  MPVMainEncContext *const m = avctx->priv_data;
1934  MPVEncContext *const s = &m->s;
1935  int stuffing_count, ret;
1936  int context_count = s->c.slice_context_count;
1937 
1938  ff_mpv_unref_picture(&s->c.cur_pic);
1939 
1940  m->vbv_ignore_qmax = 0;
1941 
1942  m->picture_in_gop_number++;
1943 
1944  ret = load_input_picture(m, pic_arg);
1945  if (ret < 0)
1946  return ret;
1947 
1949  if (ret < 0)
1950  return ret;
1951 
1952  /* output? */
1953  if (s->new_pic->data[0]) {
1954  int growing_buffer = context_count == 1 && !s->data_partitioning;
1955  size_t pkt_size = 10000 + s->c.mb_width * s->c.mb_height *
1956  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1957  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1958  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1959  if (ret < 0)
1960  return ret;
1961  }
1962  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1963  return ret;
1965  if (s->mb_info) {
1966  s->mb_info_ptr = av_packet_new_side_data(pkt,
1968  s->c.mb_width*s->c.mb_height*12);
1969  if (!s->mb_info_ptr)
1970  return AVERROR(ENOMEM);
1971  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1972  }
1973 
1974  s->c.pict_type = s->new_pic->pict_type;
1975  frame_start(m);
1976 vbv_retry:
1977  ret = encode_picture(m, pkt);
1978  if (growing_buffer) {
1979  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1980  pkt->data = s->pb.buf;
1981  pkt->size = avctx->internal->byte_buffer_size;
1982  }
1983  if (ret < 0)
1984  return -1;
1985 
1986  frame_end(m);
1987 
1988  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->c.out_format == FMT_MJPEG)
1990 
1991  if (avctx->rc_buffer_size) {
1992  RateControlContext *rcc = &m->rc_context;
1993  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1994  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1995  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1996 
1997  if (put_bits_count(&s->pb) > max_size &&
1998  s->lambda < m->lmax) {
1999  m->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
2000  (s->c.qscale + 1) / s->c.qscale);
2001  if (s->adaptive_quant) {
2002  for (int i = 0; i < s->c.mb_height * s->c.mb_stride; i++)
2003  s->lambda_table[i] =
2004  FFMAX(s->lambda_table[i] + min_step,
2005  s->lambda_table[i] * (s->c.qscale + 1) /
2006  s->c.qscale);
2007  }
2008  s->c.mb_skipped = 0; // done in frame_start()
2009  // done in encode_picture() so we must undo it
2010  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
2011  s->c.no_rounding ^= s->flipflop_rounding;
2012  }
2013  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
2014  s->c.time_base = s->c.last_time_base;
2015  s->c.last_non_b_time = s->c.time - s->c.pp_time;
2016  }
2017  m->vbv_ignore_qmax = 1;
2018  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2019  goto vbv_retry;
2020  }
2021 
2022  av_assert0(avctx->rc_max_rate);
2023  }
2024 
2025  if (avctx->flags & AV_CODEC_FLAG_PASS1)
2027 
2028  for (int i = 0; i < MPV_MAX_PLANES; i++)
2029  avctx->error[i] += s->encoding_error[i];
2030  ff_encode_add_stats_side_data(pkt, s->c.cur_pic.ptr->f->quality,
2031  s->encoding_error,
2032  (avctx->flags&AV_CODEC_FLAG_PSNR) ? MPV_MAX_PLANES : 0,
2033  s->c.pict_type);
2034 
2035  if (avctx->flags & AV_CODEC_FLAG_PASS1)
2036  assert(put_bits_count(&s->pb) == m->header_bits + s->mv_bits +
2037  s->misc_bits + s->i_tex_bits +
2038  s->p_tex_bits);
2039  flush_put_bits(&s->pb);
2040  m->frame_bits = put_bits_count(&s->pb);
2041 
2042  stuffing_count = ff_vbv_update(m, m->frame_bits);
2043  m->stuffing_bits = 8*stuffing_count;
2044  if (stuffing_count) {
2045  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
2046  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2047  return -1;
2048  }
2049 
2050  switch (s->c.codec_id) {
2053  while (stuffing_count--) {
2054  put_bits(&s->pb, 8, 0);
2055  }
2056  break;
2057  case AV_CODEC_ID_MPEG4:
2058  put_bits(&s->pb, 16, 0);
2059  put_bits(&s->pb, 16, 0x1C3);
2060  stuffing_count -= 4;
2061  while (stuffing_count--) {
2062  put_bits(&s->pb, 8, 0xFF);
2063  }
2064  break;
2065  default:
2066  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2067  m->stuffing_bits = 0;
2068  }
2069  flush_put_bits(&s->pb);
2070  m->frame_bits = put_bits_count(&s->pb);
2071  }
2072 
2073  /* update MPEG-1/2 vbv_delay for CBR */
2074  if (avctx->rc_max_rate &&
2075  avctx->rc_min_rate == avctx->rc_max_rate &&
2076  s->c.out_format == FMT_MPEG1 &&
2077  90000LL * (avctx->rc_buffer_size - 1) <=
2078  avctx->rc_max_rate * 0xFFFFLL) {
2079  AVCPBProperties *props;
2080  size_t props_size;
2081 
2082  int vbv_delay, min_delay;
2083  double inbits = avctx->rc_max_rate *
2084  av_q2d(avctx->time_base);
2085  int minbits = m->frame_bits - 8 *
2086  (m->vbv_delay_pos - 1);
2087  double bits = m->rc_context.buffer_index + minbits - inbits;
2088  uint8_t *const vbv_delay_ptr = s->pb.buf + m->vbv_delay_pos;
2089 
2090  if (bits < 0)
2091  av_log(avctx, AV_LOG_ERROR,
2092  "Internal error, negative bits\n");
2093 
2094  av_assert1(s->c.repeat_first_field == 0);
2095 
2096  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2097  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2098  avctx->rc_max_rate;
2099 
2100  vbv_delay = FFMAX(vbv_delay, min_delay);
2101 
2102  av_assert0(vbv_delay < 0xFFFF);
2103 
2104  vbv_delay_ptr[0] &= 0xF8;
2105  vbv_delay_ptr[0] |= vbv_delay >> 13;
2106  vbv_delay_ptr[1] = vbv_delay >> 5;
2107  vbv_delay_ptr[2] &= 0x07;
2108  vbv_delay_ptr[2] |= vbv_delay << 3;
2109 
2110  props = av_cpb_properties_alloc(&props_size);
2111  if (!props)
2112  return AVERROR(ENOMEM);
2113  props->vbv_delay = vbv_delay * 300;
2114 
2116  (uint8_t*)props, props_size);
2117  if (ret < 0) {
2118  av_freep(&props);
2119  return ret;
2120  }
2121  }
2122  m->total_bits += m->frame_bits;
2123 
2124  pkt->pts = s->c.cur_pic.ptr->f->pts;
2125  pkt->duration = s->c.cur_pic.ptr->f->duration;
2126  if (!s->c.low_delay && s->c.pict_type != AV_PICTURE_TYPE_B) {
2127  if (!s->c.cur_pic.ptr->coded_picture_number)
2128  pkt->dts = pkt->pts - m->dts_delta;
2129  else
2130  pkt->dts = m->reordered_pts;
2131  m->reordered_pts = pkt->pts;
2132  } else
2133  pkt->dts = pkt->pts;
2134 
2135  // the no-delay case is handled in generic code
2136  if (avctx->codec->capabilities & AV_CODEC_CAP_DELAY) {
2137  ret = ff_encode_reordered_opaque(avctx, pkt, s->c.cur_pic.ptr->f);
2138  if (ret < 0)
2139  return ret;
2140  }
2141 
2142  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
2144  if (s->mb_info)
2146  } else {
2147  m->frame_bits = 0;
2148  }
2149 
2150  ff_mpv_unref_picture(&s->c.cur_pic);
2151 
2152  av_assert1((m->frame_bits & 7) == 0);
2153 
2154  pkt->size = m->frame_bits / 8;
2155  *got_packet = !!pkt->size;
2156  return 0;
2157 }
2158 
2160  int n, int threshold)
2161 {
2162  static const char tab[64] = {
2163  3, 2, 2, 1, 1, 1, 1, 1,
2164  1, 1, 1, 1, 1, 1, 1, 1,
2165  1, 1, 1, 1, 1, 1, 1, 1,
2166  0, 0, 0, 0, 0, 0, 0, 0,
2167  0, 0, 0, 0, 0, 0, 0, 0,
2168  0, 0, 0, 0, 0, 0, 0, 0,
2169  0, 0, 0, 0, 0, 0, 0, 0,
2170  0, 0, 0, 0, 0, 0, 0, 0
2171  };
2172  int score = 0;
2173  int run = 0;
2174  int i;
2175  int16_t *block = s->block[n];
2176  const int last_index = s->c.block_last_index[n];
2177  int skip_dc;
2178 
2179  if (threshold < 0) {
2180  skip_dc = 0;
2181  threshold = -threshold;
2182  } else
2183  skip_dc = 1;
2184 
2185  /* Are all we could set to zero already zero? */
2186  if (last_index <= skip_dc - 1)
2187  return;
2188 
2189  for (i = 0; i <= last_index; i++) {
2190  const int j = s->c.intra_scantable.permutated[i];
2191  const int level = FFABS(block[j]);
2192  if (level == 1) {
2193  if (skip_dc && i == 0)
2194  continue;
2195  score += tab[run];
2196  run = 0;
2197  } else if (level > 1) {
2198  return;
2199  } else {
2200  run++;
2201  }
2202  }
2203  if (score >= threshold)
2204  return;
2205  for (i = skip_dc; i <= last_index; i++) {
2206  const int j = s->c.intra_scantable.permutated[i];
2207  block[j] = 0;
2208  }
2209  if (block[0])
2210  s->c.block_last_index[n] = 0;
2211  else
2212  s->c.block_last_index[n] = -1;
2213 }
2214 
2215 static inline void clip_coeffs(const MPVEncContext *const s, int16_t block[],
2216  int last_index)
2217 {
2218  int i;
2219  const int maxlevel = s->max_qcoeff;
2220  const int minlevel = s->min_qcoeff;
2221  int overflow = 0;
2222 
2223  if (s->c.mb_intra) {
2224  i = 1; // skip clipping of intra dc
2225  } else
2226  i = 0;
2227 
2228  for (; i <= last_index; i++) {
2229  const int j = s->c.intra_scantable.permutated[i];
2230  int level = block[j];
2231 
2232  if (level > maxlevel) {
2233  level = maxlevel;
2234  overflow++;
2235  } else if (level < minlevel) {
2236  level = minlevel;
2237  overflow++;
2238  }
2239 
2240  block[j] = level;
2241  }
2242 
2243  if (overflow && s->c.avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2244  av_log(s->c.avctx, AV_LOG_INFO,
2245  "warning, clipping %d dct coefficients to %d..%d\n",
2246  overflow, minlevel, maxlevel);
2247 }
2248 
2249 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2250 {
2251  int x, y;
2252  // FIXME optimize
2253  for (y = 0; y < 8; y++) {
2254  for (x = 0; x < 8; x++) {
2255  int x2, y2;
2256  int sum = 0;
2257  int sqr = 0;
2258  int count = 0;
2259 
2260  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2261  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2262  int v = ptr[x2 + y2 * stride];
2263  sum += v;
2264  sqr += v * v;
2265  count++;
2266  }
2267  }
2268  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2269  }
2270  }
2271 }
2272 
2274  int motion_x, int motion_y,
2275  int mb_block_height,
2276  int mb_block_width,
2277  int mb_block_count,
2278  int chroma_x_shift,
2279  int chroma_y_shift,
2280  int chroma_format)
2281 {
2282 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2283  * and neither of these encoders currently supports 444. */
2284 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2285  (s)->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2286  DECLARE_ALIGNED(16, int16_t, weight)[12][64];
2287  int16_t orig[12][64];
2288  const int mb_x = s->c.mb_x;
2289  const int mb_y = s->c.mb_y;
2290  int i;
2291  int skip_dct[12];
2292  int dct_offset = s->c.linesize * 8; // default for progressive frames
2293  int uv_dct_offset = s->c.uvlinesize * 8;
2294  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2295  ptrdiff_t wrap_y, wrap_c;
2296 
2297  for (i = 0; i < mb_block_count; i++)
2298  skip_dct[i] = s->skipdct;
2299 
2300  if (s->adaptive_quant) {
2301  const int last_qp = s->c.qscale;
2302  const int mb_xy = mb_x + mb_y * s->c.mb_stride;
2303 
2304  s->lambda = s->lambda_table[mb_xy];
2305  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
2307 
2308  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2309  s->dquant = s->c.cur_pic.qscale_table[mb_xy] - last_qp;
2310 
2311  if (s->c.out_format == FMT_H263) {
2312  s->dquant = av_clip(s->dquant, -2, 2);
2313 
2314  if (s->c.codec_id == AV_CODEC_ID_MPEG4) {
2315  if (!s->c.mb_intra) {
2316  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
2317  if (s->dquant & 1 || s->c.mv_dir & MV_DIRECT)
2318  s->dquant = 0;
2319  }
2320  if (s->c.mv_type == MV_TYPE_8X8)
2321  s->dquant = 0;
2322  }
2323  }
2324  }
2325  }
2326  ff_set_qscale(&s->c, last_qp + s->dquant);
2327  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2328  ff_set_qscale(&s->c, s->c.qscale + s->dquant);
2329 
2330  wrap_y = s->c.linesize;
2331  wrap_c = s->c.uvlinesize;
2332  ptr_y = s->new_pic->data[0] +
2333  (mb_y * 16 * wrap_y) + mb_x * 16;
2334  ptr_cb = s->new_pic->data[1] +
2335  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2336  ptr_cr = s->new_pic->data[2] +
2337  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2338 
2339  if ((mb_x * 16 + 16 > s->c.width || mb_y * 16 + 16 > s->c.height) &&
2340  s->c.codec_id != AV_CODEC_ID_AMV) {
2341  uint8_t *ebuf = s->c.sc.edge_emu_buffer + 38 * wrap_y;
2342  int cw = (s->c.width + chroma_x_shift) >> chroma_x_shift;
2343  int ch = (s->c.height + chroma_y_shift) >> chroma_y_shift;
2344  s->c.vdsp.emulated_edge_mc(ebuf, ptr_y,
2345  wrap_y, wrap_y,
2346  16, 16, mb_x * 16, mb_y * 16,
2347  s->c.width, s->c.height);
2348  ptr_y = ebuf;
2349  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2350  wrap_c, wrap_c,
2351  mb_block_width, mb_block_height,
2352  mb_x * mb_block_width, mb_y * mb_block_height,
2353  cw, ch);
2354  ptr_cb = ebuf + 16 * wrap_y;
2355  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2356  wrap_c, wrap_c,
2357  mb_block_width, mb_block_height,
2358  mb_x * mb_block_width, mb_y * mb_block_height,
2359  cw, ch);
2360  ptr_cr = ebuf + 16 * wrap_y + 16;
2361  }
2362 
2363  if (s->c.mb_intra) {
2364  if (INTERLACED_DCT(s)) {
2365  int progressive_score, interlaced_score;
2366 
2367  s->c.interlaced_dct = 0;
2368  progressive_score = s->ildct_cmp[1](s, ptr_y, NULL, wrap_y, 8) +
2369  s->ildct_cmp[1](s, ptr_y + wrap_y * 8,
2370  NULL, wrap_y, 8) - 400;
2371 
2372  if (progressive_score > 0) {
2373  interlaced_score = s->ildct_cmp[1](s, ptr_y,
2374  NULL, wrap_y * 2, 8) +
2375  s->ildct_cmp[1](s, ptr_y + wrap_y,
2376  NULL, wrap_y * 2, 8);
2377  if (progressive_score > interlaced_score) {
2378  s->c.interlaced_dct = 1;
2379 
2380  dct_offset = wrap_y;
2381  uv_dct_offset = wrap_c;
2382  wrap_y <<= 1;
2383  if (chroma_format == CHROMA_422 ||
2384  chroma_format == CHROMA_444)
2385  wrap_c <<= 1;
2386  }
2387  }
2388  }
2389 
2390  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2391  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2392  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2393  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2394 
2395  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2396  skip_dct[4] = 1;
2397  skip_dct[5] = 1;
2398  } else {
2399  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2400  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2401  if (chroma_format == CHROMA_422) {
2402  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2403  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2404  } else if (chroma_format == CHROMA_444) {
2405  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2406  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2407  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2408  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2409  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2410  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2411  }
2412  }
2413  } else {
2414  op_pixels_func (*op_pix)[4];
2415  qpel_mc_func (*op_qpix)[16];
2416  uint8_t *dest_y, *dest_cb, *dest_cr;
2417 
2418  dest_y = s->c.dest[0];
2419  dest_cb = s->c.dest[1];
2420  dest_cr = s->c.dest[2];
2421 
2422  if ((!s->c.no_rounding) || s->c.pict_type == AV_PICTURE_TYPE_B) {
2423  op_pix = s->c.hdsp.put_pixels_tab;
2424  op_qpix = s->c.qdsp.put_qpel_pixels_tab;
2425  } else {
2426  op_pix = s->c.hdsp.put_no_rnd_pixels_tab;
2427  op_qpix = s->c.qdsp.put_no_rnd_qpel_pixels_tab;
2428  }
2429 
2430  if (s->c.mv_dir & MV_DIR_FORWARD) {
2431  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 0,
2432  s->c.last_pic.data,
2433  op_pix, op_qpix);
2434  op_pix = s->c.hdsp.avg_pixels_tab;
2435  op_qpix = s->c.qdsp.avg_qpel_pixels_tab;
2436  }
2437  if (s->c.mv_dir & MV_DIR_BACKWARD) {
2438  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 1,
2439  s->c.next_pic.data,
2440  op_pix, op_qpix);
2441  }
2442 
2443  if (INTERLACED_DCT(s)) {
2444  int progressive_score, interlaced_score;
2445 
2446  s->c.interlaced_dct = 0;
2447  progressive_score = s->ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2448  s->ildct_cmp[0](s, dest_y + wrap_y * 8,
2449  ptr_y + wrap_y * 8,
2450  wrap_y, 8) - 400;
2451 
2452  if (s->c.avctx->ildct_cmp == FF_CMP_VSSE)
2453  progressive_score -= 400;
2454 
2455  if (progressive_score > 0) {
2456  interlaced_score = s->ildct_cmp[0](s, dest_y, ptr_y,
2457  wrap_y * 2, 8) +
2458  s->ildct_cmp[0](s, dest_y + wrap_y,
2459  ptr_y + wrap_y,
2460  wrap_y * 2, 8);
2461 
2462  if (progressive_score > interlaced_score) {
2463  s->c.interlaced_dct = 1;
2464 
2465  dct_offset = wrap_y;
2466  uv_dct_offset = wrap_c;
2467  wrap_y <<= 1;
2468  if (chroma_format == CHROMA_422)
2469  wrap_c <<= 1;
2470  }
2471  }
2472  }
2473 
2474  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2475  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2476  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2477  dest_y + dct_offset, wrap_y);
2478  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2479  dest_y + dct_offset + 8, wrap_y);
2480 
2481  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2482  skip_dct[4] = 1;
2483  skip_dct[5] = 1;
2484  } else {
2485  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2486  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2487  if (!chroma_y_shift) { /* 422 */
2488  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2489  dest_cb + uv_dct_offset, wrap_c);
2490  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2491  dest_cr + uv_dct_offset, wrap_c);
2492  }
2493  }
2494  /* pre quantization */
2495  if (s->mc_mb_var[s->c.mb_stride * mb_y + mb_x] < 2 * s->c.qscale * s->c.qscale) {
2496  // FIXME optimize
2497  if (s->sad_cmp[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->c.qscale)
2498  skip_dct[0] = 1;
2499  if (s->sad_cmp[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->c.qscale)
2500  skip_dct[1] = 1;
2501  if (s->sad_cmp[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2502  wrap_y, 8) < 20 * s->c.qscale)
2503  skip_dct[2] = 1;
2504  if (s->sad_cmp[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2505  wrap_y, 8) < 20 * s->c.qscale)
2506  skip_dct[3] = 1;
2507  if (s->sad_cmp[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->c.qscale)
2508  skip_dct[4] = 1;
2509  if (s->sad_cmp[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->c.qscale)
2510  skip_dct[5] = 1;
2511  if (!chroma_y_shift) { /* 422 */
2512  if (s->sad_cmp[1](NULL, ptr_cb + uv_dct_offset,
2513  dest_cb + uv_dct_offset,
2514  wrap_c, 8) < 20 * s->c.qscale)
2515  skip_dct[6] = 1;
2516  if (s->sad_cmp[1](NULL, ptr_cr + uv_dct_offset,
2517  dest_cr + uv_dct_offset,
2518  wrap_c, 8) < 20 * s->c.qscale)
2519  skip_dct[7] = 1;
2520  }
2521  }
2522  }
2523 
2524  if (s->quantizer_noise_shaping) {
2525  if (!skip_dct[0])
2526  get_visual_weight(weight[0], ptr_y , wrap_y);
2527  if (!skip_dct[1])
2528  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2529  if (!skip_dct[2])
2530  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2531  if (!skip_dct[3])
2532  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2533  if (!skip_dct[4])
2534  get_visual_weight(weight[4], ptr_cb , wrap_c);
2535  if (!skip_dct[5])
2536  get_visual_weight(weight[5], ptr_cr , wrap_c);
2537  if (!chroma_y_shift) { /* 422 */
2538  if (!skip_dct[6])
2539  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2540  wrap_c);
2541  if (!skip_dct[7])
2542  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2543  wrap_c);
2544  }
2545  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2546  }
2547 
2548  /* DCT & quantize */
2549  av_assert2(s->c.out_format != FMT_MJPEG || s->c.qscale == 8);
2550  {
2551  for (i = 0; i < mb_block_count; i++) {
2552  if (!skip_dct[i]) {
2553  int overflow;
2554  s->c.block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->c.qscale, &overflow);
2555  // FIXME we could decide to change to quantizer instead of
2556  // clipping
2557  // JS: I don't think that would be a good idea it could lower
2558  // quality instead of improve it. Just INTRADC clipping
2559  // deserves changes in quantizer
2560  if (overflow)
2561  clip_coeffs(s, s->block[i], s->c.block_last_index[i]);
2562  } else
2563  s->c.block_last_index[i] = -1;
2564  }
2565  if (s->quantizer_noise_shaping) {
2566  for (i = 0; i < mb_block_count; i++) {
2567  if (!skip_dct[i]) {
2568  s->c.block_last_index[i] =
2569  dct_quantize_refine(s, s->block[i], weight[i],
2570  orig[i], i, s->c.qscale);
2571  }
2572  }
2573  }
2574 
2575  if (s->luma_elim_threshold && !s->c.mb_intra)
2576  for (i = 0; i < 4; i++)
2577  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2578  if (s->chroma_elim_threshold && !s->c.mb_intra)
2579  for (i = 4; i < mb_block_count; i++)
2580  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2581 
2582  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2583  for (i = 0; i < mb_block_count; i++) {
2584  if (s->c.block_last_index[i] == -1)
2585  s->coded_score[i] = INT_MAX / 256;
2586  }
2587  }
2588  }
2589 
2590  if ((s->c.avctx->flags & AV_CODEC_FLAG_GRAY) && s->c.mb_intra) {
2591  s->c.block_last_index[4] =
2592  s->c.block_last_index[5] = 0;
2593  s->block[4][0] =
2594  s->block[5][0] = (1024 + s->c.c_dc_scale / 2) / s->c.c_dc_scale;
2595  if (!chroma_y_shift) { /* 422 / 444 */
2596  for (i=6; i<12; i++) {
2597  s->c.block_last_index[i] = 0;
2598  s->block[i][0] = s->block[4][0];
2599  }
2600  }
2601  }
2602 
2603  // non c quantize code returns incorrect block_last_index FIXME
2604  if (s->c.alternate_scan && s->dct_quantize != dct_quantize_c) {
2605  for (i = 0; i < mb_block_count; i++) {
2606  int j;
2607  if (s->c.block_last_index[i] > 0) {
2608  for (j = 63; j > 0; j--) {
2609  if (s->block[i][s->c.intra_scantable.permutated[j]])
2610  break;
2611  }
2612  s->c.block_last_index[i] = j;
2613  }
2614  }
2615  }
2616 
2617  s->encode_mb(s, s->block, motion_x, motion_y);
2618 }
2619 
2620 static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
2621 {
2622  if (s->c.chroma_format == CHROMA_420)
2623  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2624  else if (s->c.chroma_format == CHROMA_422)
2625  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2626  else
2627  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2628 }
2629 
2630 typedef struct MBBackup {
2631  struct {
2632  int mv[2][4][2];
2633  int last_mv[2][2][2];
2636  int qscale;
2639  } c;
2641  int last_dc[3];
2643  int dquant;
2645  int16_t (*block)[64];
2647 } MBBackup;
2648 
2649 #define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE) \
2650 static inline void BEFORE ##_context_before_encode(DST_TYPE *const d, \
2651  const SRC_TYPE *const s) \
2652 { \
2653  /* FIXME is memcpy faster than a loop? */ \
2654  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2655  \
2656  /* MPEG-1 */ \
2657  d->mb_skip_run = s->mb_skip_run; \
2658  for (int i = 0; i < 3; i++) \
2659  d->last_dc[i] = s->last_dc[i]; \
2660  \
2661  /* statistics */ \
2662  d->mv_bits = s->mv_bits; \
2663  d->i_tex_bits = s->i_tex_bits; \
2664  d->p_tex_bits = s->p_tex_bits; \
2665  d->i_count = s->i_count; \
2666  d->misc_bits = s->misc_bits; \
2667  d->last_bits = 0; \
2668  \
2669  d->c.mb_skipped = 0; \
2670  d->c.qscale = s->c.qscale; \
2671  d->dquant = s->dquant; \
2672  \
2673  d->esc3_level_length = s->esc3_level_length; \
2674 } \
2675  \
2676 static inline void AFTER ## _context_after_encode(DST_TYPE *const d, \
2677  const SRC_TYPE *const s, \
2678  int data_partitioning) \
2679 { \
2680  /* FIXME is memcpy faster than a loop? */ \
2681  memcpy(d->c.mv, s->c.mv, 2*4*2*sizeof(int)); \
2682  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2683  \
2684  /* MPEG-1 */ \
2685  d->mb_skip_run = s->mb_skip_run; \
2686  for (int i = 0; i < 3; i++) \
2687  d->last_dc[i] = s->last_dc[i]; \
2688  \
2689  /* statistics */ \
2690  d->mv_bits = s->mv_bits; \
2691  d->i_tex_bits = s->i_tex_bits; \
2692  d->p_tex_bits = s->p_tex_bits; \
2693  d->i_count = s->i_count; \
2694  d->misc_bits = s->misc_bits; \
2695  \
2696  d->c.mb_intra = s->c.mb_intra; \
2697  d->c.mb_skipped = s->c.mb_skipped; \
2698  d->c.mv_type = s->c.mv_type; \
2699  d->c.mv_dir = s->c.mv_dir; \
2700  d->pb = s->pb; \
2701  if (data_partitioning) { \
2702  d->pb2 = s->pb2; \
2703  d->tex_pb = s->tex_pb; \
2704  } \
2705  d->block = s->block; \
2706  for (int i = 0; i < 8; i++) \
2707  d->c.block_last_index[i] = s->c.block_last_index[i]; \
2708  d->c.interlaced_dct = s->c.interlaced_dct; \
2709  d->c.qscale = s->c.qscale; \
2710  \
2711  d->esc3_level_length = s->esc3_level_length; \
2712 }
2713 
2714 COPY_CONTEXT(backup, save, MBBackup, MPVEncContext)
2715 COPY_CONTEXT(reset, store, MPVEncContext, MBBackup)
2716 
2717 static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best,
2718  PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2719  int *dmin, int *next_block, int motion_x, int motion_y)
2720 {
2721  int score;
2722  uint8_t *dest_backup[3];
2723 
2724  reset_context_before_encode(s, backup);
2725 
2726  s->block = s->blocks[*next_block];
2727  s->pb = pb[*next_block];
2728  if (s->data_partitioning) {
2729  s->pb2 = pb2 [*next_block];
2730  s->tex_pb= tex_pb[*next_block];
2731  }
2732 
2733  if(*next_block){
2734  memcpy(dest_backup, s->c.dest, sizeof(s->c.dest));
2735  s->c.dest[0] = s->c.sc.rd_scratchpad;
2736  s->c.dest[1] = s->c.sc.rd_scratchpad + 16*s->c.linesize;
2737  s->c.dest[2] = s->c.sc.rd_scratchpad + 16*s->c.linesize + 8;
2738  av_assert0(s->c.linesize >= 32); //FIXME
2739  }
2740 
2741  encode_mb(s, motion_x, motion_y);
2742 
2743  score= put_bits_count(&s->pb);
2744  if (s->data_partitioning) {
2745  score+= put_bits_count(&s->pb2);
2746  score+= put_bits_count(&s->tex_pb);
2747  }
2748 
2749  if (s->c.avctx->mb_decision == FF_MB_DECISION_RD) {
2750  mpv_reconstruct_mb(s, s->block);
2751 
2752  score *= s->lambda2;
2753  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2754  }
2755 
2756  if(*next_block){
2757  memcpy(s->c.dest, dest_backup, sizeof(s->c.dest));
2758  }
2759 
2760  if(score<*dmin){
2761  *dmin= score;
2762  *next_block^=1;
2763 
2764  save_context_after_encode(best, s, s->data_partitioning);
2765  }
2766 }
2767 
2768 static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
2769 {
2770  const uint32_t *sq = ff_square_tab + 256;
2771  int acc=0;
2772  int x,y;
2773 
2774  if(w==16 && h==16)
2775  return s->sse_cmp[0](NULL, src1, src2, stride, 16);
2776  else if(w==8 && h==8)
2777  return s->sse_cmp[1](NULL, src1, src2, stride, 8);
2778 
2779  for(y=0; y<h; y++){
2780  for(x=0; x<w; x++){
2781  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2782  }
2783  }
2784 
2785  av_assert2(acc>=0);
2786 
2787  return acc;
2788 }
2789 
2790 static int sse_mb(MPVEncContext *const s)
2791 {
2792  int w= 16;
2793  int h= 16;
2794  int chroma_mb_w = w >> s->c.chroma_x_shift;
2795  int chroma_mb_h = h >> s->c.chroma_y_shift;
2796 
2797  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
2798  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
2799 
2800  if(w==16 && h==16)
2801  return s->n_sse_cmp[0](s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2802  s->c.dest[0], s->c.linesize, 16) +
2803  s->n_sse_cmp[1](s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2804  s->c.dest[1], s->c.uvlinesize, chroma_mb_h) +
2805  s->n_sse_cmp[1](s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2806  s->c.dest[2], s->c.uvlinesize, chroma_mb_h);
2807  else
2808  return sse(s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2809  s->c.dest[0], w, h, s->c.linesize) +
2810  sse(s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2811  s->c.dest[1], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize) +
2812  sse(s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2813  s->c.dest[2], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize);
2814 }
2815 
2817  MPVEncContext *const s = *(void**)arg;
2818 
2819 
2820  s->me.pre_pass = 1;
2821  s->me.dia_size = s->c.avctx->pre_dia_size;
2822  s->c.first_slice_line = 1;
2823  for (s->c.mb_y = s->c.end_mb_y - 1; s->c.mb_y >= s->c.start_mb_y; s->c.mb_y--) {
2824  for (s->c.mb_x = s->c.mb_width - 1; s->c.mb_x >=0 ; s->c.mb_x--)
2825  ff_pre_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2826  s->c.first_slice_line = 0;
2827  }
2828 
2829  s->me.pre_pass = 0;
2830 
2831  return 0;
2832 }
2833 
2835  MPVEncContext *const s = *(void**)arg;
2836 
2837  s->me.dia_size = s->c.avctx->dia_size;
2838  s->c.first_slice_line = 1;
2839  for (s->c.mb_y = s->c.start_mb_y; s->c.mb_y < s->c.end_mb_y; s->c.mb_y++) {
2840  s->c.mb_x = 0; //for block init below
2841  ff_init_block_index(&s->c);
2842  for (s->c.mb_x = 0; s->c.mb_x < s->c.mb_width; s->c.mb_x++) {
2843  s->c.block_index[0] += 2;
2844  s->c.block_index[1] += 2;
2845  s->c.block_index[2] += 2;
2846  s->c.block_index[3] += 2;
2847 
2848  /* compute motion vector & mb_type and store in context */
2849  if (s->c.pict_type == AV_PICTURE_TYPE_B)
2850  ff_estimate_b_frame_motion(s, s->c.mb_x, s->c.mb_y);
2851  else
2852  ff_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2853  }
2854  s->c.first_slice_line = 0;
2855  }
2856  return 0;
2857 }
2858 
2859 static int mb_var_thread(AVCodecContext *c, void *arg){
2860  MPVEncContext *const s = *(void**)arg;
2861 
2862  for (int mb_y = s->c.start_mb_y; mb_y < s->c.end_mb_y; mb_y++) {
2863  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
2864  int xx = mb_x * 16;
2865  int yy = mb_y * 16;
2866  const uint8_t *pix = s->new_pic->data[0] + (yy * s->c.linesize) + xx;
2867  int varc;
2868  int sum = s->mpvencdsp.pix_sum(pix, s->c.linesize);
2869 
2870  varc = (s->mpvencdsp.pix_norm1(pix, s->c.linesize) -
2871  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2872 
2873  s->mb_var [s->c.mb_stride * mb_y + mb_x] = varc;
2874  s->mb_mean[s->c.mb_stride * mb_y + mb_x] = (sum+128)>>8;
2875  s->me.mb_var_sum_temp += varc;
2876  }
2877  }
2878  return 0;
2879 }
2880 
2881 static void write_slice_end(MPVEncContext *const s)
2882 {
2883  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4) {
2884  if (s->partitioned_frame)
2886 
2887  ff_mpeg4_stuffing(&s->pb);
2888  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2889  s->c.out_format == FMT_MJPEG) {
2891  } else if (CONFIG_SPEEDHQ_ENCODER && s->c.out_format == FMT_SPEEDHQ) {
2893  }
2894 
2895  flush_put_bits(&s->pb);
2896 
2897  if ((s->c.avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2898  s->misc_bits+= get_bits_diff(s);
2899 }
2900 
2901 static void write_mb_info(MPVEncContext *const s)
2902 {
2903  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2904  int offset = put_bits_count(&s->pb);
2905  int mba = s->c.mb_x + s->c.mb_width * (s->c.mb_y % s->gob_index);
2906  int gobn = s->c.mb_y / s->gob_index;
2907  int pred_x, pred_y;
2908  if (CONFIG_H263_ENCODER)
2909  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
2910  bytestream_put_le32(&ptr, offset);
2911  bytestream_put_byte(&ptr, s->c.qscale);
2912  bytestream_put_byte(&ptr, gobn);
2913  bytestream_put_le16(&ptr, mba);
2914  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2915  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2916  /* 4MV not implemented */
2917  bytestream_put_byte(&ptr, 0); /* hmv2 */
2918  bytestream_put_byte(&ptr, 0); /* vmv2 */
2919 }
2920 
2921 static void update_mb_info(MPVEncContext *const s, int startcode)
2922 {
2923  if (!s->mb_info)
2924  return;
2925  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2926  s->mb_info_size += 12;
2927  s->prev_mb_info = s->last_mb_info;
2928  }
2929  if (startcode) {
2930  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2931  /* This might have incremented mb_info_size above, and we return without
2932  * actually writing any info into that slot yet. But in that case,
2933  * this will be called again at the start of the after writing the
2934  * start code, actually writing the mb info. */
2935  return;
2936  }
2937 
2938  s->last_mb_info = put_bytes_count(&s->pb, 0);
2939  if (!s->mb_info_size)
2940  s->mb_info_size += 12;
2941  write_mb_info(s);
2942 }
2943 
2944 int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
2945 {
2946  if (put_bytes_left(&s->pb, 0) < threshold
2947  && s->c.slice_context_count == 1
2948  && s->pb.buf == s->c.avctx->internal->byte_buffer) {
2949  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2950 
2951  uint8_t *new_buffer = NULL;
2952  int new_buffer_size = 0;
2953 
2954  if ((s->c.avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2955  av_log(s->c.avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2956  return AVERROR(ENOMEM);
2957  }
2958 
2959  emms_c();
2960 
2961  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2962  s->c.avctx->internal->byte_buffer_size + size_increase);
2963  if (!new_buffer)
2964  return AVERROR(ENOMEM);
2965 
2966  memcpy(new_buffer, s->c.avctx->internal->byte_buffer, s->c.avctx->internal->byte_buffer_size);
2967  av_free(s->c.avctx->internal->byte_buffer);
2968  s->c.avctx->internal->byte_buffer = new_buffer;
2969  s->c.avctx->internal->byte_buffer_size = new_buffer_size;
2970  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2971  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2972  }
2973  if (put_bytes_left(&s->pb, 0) < threshold)
2974  return AVERROR(EINVAL);
2975  return 0;
2976 }
2977 
2978 static int encode_thread(AVCodecContext *c, void *arg){
2979  MPVEncContext *const s = *(void**)arg;
2980  int chr_h = 16 >> s->c.chroma_y_shift;
2981  int i;
2982  MBBackup best_s = { 0 }, backup_s;
2983  uint8_t bit_buf[2][MAX_MB_BYTES];
2984  // + 2 because ff_copy_bits() overreads
2985  uint8_t bit_buf2[2][MAX_PB2_MB_SIZE + 2];
2986  uint8_t bit_buf_tex[2][MAX_AC_TEX_MB_SIZE + 2];
2987  PutBitContext pb[2], pb2[2], tex_pb[2];
2988 
2989  for(i=0; i<2; i++){
2990  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2991  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_PB2_MB_SIZE);
2992  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_AC_TEX_MB_SIZE);
2993  }
2994 
2995  s->last_bits= put_bits_count(&s->pb);
2996  s->mv_bits=0;
2997  s->misc_bits=0;
2998  s->i_tex_bits=0;
2999  s->p_tex_bits=0;
3000  s->i_count=0;
3001 
3002  for(i=0; i<3; i++){
3003  /* init last dc values */
3004  /* note: quant matrix value (8) is implied here */
3005  s->last_dc[i] = 128 << s->c.intra_dc_precision;
3006 
3007  s->encoding_error[i] = 0;
3008  }
3009  if (s->c.codec_id == AV_CODEC_ID_AMV) {
3010  s->last_dc[0] = 128 * 8 / 13;
3011  s->last_dc[1] = 128 * 8 / 14;
3012  s->last_dc[2] = 128 * 8 / 14;
3013 #if CONFIG_MPEG4_ENCODER
3014  } else if (s->partitioned_frame) {
3015  av_assert1(s->c.codec_id == AV_CODEC_ID_MPEG4);
3017 #endif
3018  }
3019  s->mb_skip_run = 0;
3020  memset(s->c.last_mv, 0, sizeof(s->c.last_mv));
3021 
3022  s->last_mv_dir = 0;
3023 
3024  s->c.resync_mb_x = 0;
3025  s->c.resync_mb_y = 0;
3026  s->c.first_slice_line = 1;
3027  s->ptr_lastgob = s->pb.buf;
3028  for (int mb_y_order = s->c.start_mb_y; mb_y_order < s->c.end_mb_y; mb_y_order++) {
3029  int mb_y;
3030  if (CONFIG_SPEEDHQ_ENCODER && s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
3031  int first_in_slice;
3032  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->c.mb_height, &first_in_slice);
3033  if (first_in_slice && mb_y_order != s->c.start_mb_y)
3035  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->c.intra_dc_precision;
3036  } else {
3037  mb_y = mb_y_order;
3038  }
3039  s->c.mb_x = 0;
3040  s->c.mb_y = mb_y;
3041 
3042  ff_set_qscale(&s->c, s->c.qscale);
3043  ff_init_block_index(&s->c);
3044 
3045  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
3046  int mb_type, xy;
3047 // int d;
3048  int dmin= INT_MAX;
3049  int dir;
3050  int size_increase = s->c.avctx->internal->byte_buffer_size/4
3051  + s->c.mb_width*MAX_MB_BYTES;
3052 
3054  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3055  av_log(s->c.avctx, AV_LOG_ERROR, "encoded frame too large\n");
3056  return -1;
3057  }
3058  if (s->data_partitioning) {
3059  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3060  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3061  av_log(s->c.avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3062  return -1;
3063  }
3064  }
3065 
3066  s->c.mb_x = mb_x;
3067  s->c.mb_y = mb_y; // moved into loop, can get changed by H.261
3068  ff_update_block_index(&s->c, 8, 0, s->c.chroma_x_shift);
3069 
3070  if (CONFIG_H261_ENCODER && s->c.codec_id == AV_CODEC_ID_H261)
3072  xy = s->c.mb_y * s->c.mb_stride + s->c.mb_x;
3073  mb_type = s->mb_type[xy];
3074 
3075  /* write gob / video packet header */
3076  if(s->rtp_mode){
3077  int current_packet_size, is_gob_start;
3078 
3079  current_packet_size = put_bytes_count(&s->pb, 1)
3080  - (s->ptr_lastgob - s->pb.buf);
3081 
3082  is_gob_start = s->rtp_payload_size &&
3083  current_packet_size >= s->rtp_payload_size &&
3084  mb_y + mb_x > 0;
3085 
3086  if (s->c.start_mb_y == mb_y && mb_y > 0 && mb_x == 0) is_gob_start = 1;
3087 
3088  switch (s->c.codec_id) {
3089  case AV_CODEC_ID_H263:
3090  case AV_CODEC_ID_H263P:
3091  if (!s->h263_slice_structured)
3092  if (s->c.mb_x || s->c.mb_y % s->gob_index) is_gob_start = 0;
3093  break;
3095  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3097  if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO && s->c.mb_y >= 175 ||
3098  s->mb_skip_run)
3099  is_gob_start=0;
3100  break;
3101  case AV_CODEC_ID_MJPEG:
3102  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3103  break;
3104  }
3105 
3106  if(is_gob_start){
3107  if (s->c.start_mb_y != mb_y || mb_x != 0) {
3108  write_slice_end(s);
3109 
3110  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 && s->partitioned_frame)
3112  }
3113 
3114  av_assert2((put_bits_count(&s->pb)&7) == 0);
3115  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3116 
3117  if (s->error_rate && s->c.resync_mb_x + s->c.resync_mb_y > 0) {
3118  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->c.mb_x + s->c.mb_y;
3119  int d = 100 / s->error_rate;
3120  if(r % d == 0){
3121  current_packet_size=0;
3122  s->pb.buf_ptr= s->ptr_lastgob;
3123  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3124  }
3125  }
3126 
3127  switch (s->c.codec_id) {
3128  case AV_CODEC_ID_MPEG4:
3129  if (CONFIG_MPEG4_ENCODER) {
3133  }
3134  break;
3137  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3140  }
3141  break;
3142 #if CONFIG_H263P_ENCODER
3143  case AV_CODEC_ID_H263P:
3144  if (s->c.dc_val)
3146  // fallthrough
3147 #endif
3148  case AV_CODEC_ID_H263:
3149  if (CONFIG_H263_ENCODER) {
3150  update_mb_info(s, 1);
3152  }
3153  break;
3154  }
3155 
3156  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS1) {
3157  int bits= put_bits_count(&s->pb);
3158  s->misc_bits+= bits - s->last_bits;
3159  s->last_bits= bits;
3160  }
3161 
3162  s->ptr_lastgob += current_packet_size;
3163  s->c.first_slice_line = 1;
3164  s->c.resync_mb_x = mb_x;
3165  s->c.resync_mb_y = mb_y;
3166  }
3167  }
3168 
3169  if (s->c.resync_mb_x == s->c.mb_x &&
3170  s->c.resync_mb_y+1 == s->c.mb_y)
3171  s->c.first_slice_line = 0;
3172 
3173  s->c.mb_skipped = 0;
3174  s->dquant=0; //only for QP_RD
3175 
3176  update_mb_info(s, 0);
3177 
3178  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3179  int next_block=0;
3180  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3181 
3182  backup_context_before_encode(&backup_s, s);
3183  backup_s.pb= s->pb;
3184  if (s->data_partitioning) {
3185  backup_s.pb2= s->pb2;
3186  backup_s.tex_pb= s->tex_pb;
3187  }
3188 
3189  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3190  s->c.mv_dir = MV_DIR_FORWARD;
3191  s->c.mv_type = MV_TYPE_16X16;
3192  s->c.mb_intra = 0;
3193  s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3194  s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3195  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3196  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3197  }
3198  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3199  s->c.mv_dir = MV_DIR_FORWARD;
3200  s->c.mv_type = MV_TYPE_FIELD;
3201  s->c.mb_intra = 0;
3202  for(i=0; i<2; i++){
3203  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3204  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3205  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3206  }
3207  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3208  &dmin, &next_block, 0, 0);
3209  }
3210  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3211  s->c.mv_dir = MV_DIR_FORWARD;
3212  s->c.mv_type = MV_TYPE_16X16;
3213  s->c.mb_intra = 0;
3214  s->c.mv[0][0][0] = 0;
3215  s->c.mv[0][0][1] = 0;
3216  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3217  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3218  }
3219  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3220  s->c.mv_dir = MV_DIR_FORWARD;
3221  s->c.mv_type = MV_TYPE_8X8;
3222  s->c.mb_intra = 0;
3223  for(i=0; i<4; i++){
3224  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3225  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3226  }
3227  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3228  &dmin, &next_block, 0, 0);
3229  }
3230  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3231  s->c.mv_dir = MV_DIR_FORWARD;
3232  s->c.mv_type = MV_TYPE_16X16;
3233  s->c.mb_intra = 0;
3234  s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3235  s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3236  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3237  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3238  }
3239  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3240  s->c.mv_dir = MV_DIR_BACKWARD;
3241  s->c.mv_type = MV_TYPE_16X16;
3242  s->c.mb_intra = 0;
3243  s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3244  s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3245  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3246  &dmin, &next_block, s->c.mv[1][0][0], s->c.mv[1][0][1]);
3247  }
3248  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3249  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3250  s->c.mv_type = MV_TYPE_16X16;
3251  s->c.mb_intra = 0;
3252  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3253  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3254  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3255  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3256  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3257  &dmin, &next_block, 0, 0);
3258  }
3259  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3260  s->c.mv_dir = MV_DIR_FORWARD;
3261  s->c.mv_type = MV_TYPE_FIELD;
3262  s->c.mb_intra = 0;
3263  for(i=0; i<2; i++){
3264  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3265  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3266  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3267  }
3268  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3269  &dmin, &next_block, 0, 0);
3270  }
3271  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3272  s->c.mv_dir = MV_DIR_BACKWARD;
3273  s->c.mv_type = MV_TYPE_FIELD;
3274  s->c.mb_intra = 0;
3275  for(i=0; i<2; i++){
3276  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3277  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3278  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3279  }
3280  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3281  &dmin, &next_block, 0, 0);
3282  }
3283  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3284  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3285  s->c.mv_type = MV_TYPE_FIELD;
3286  s->c.mb_intra = 0;
3287  for(dir=0; dir<2; dir++){
3288  for(i=0; i<2; i++){
3289  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3290  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3291  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3292  }
3293  }
3294  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3295  &dmin, &next_block, 0, 0);
3296  }
3297  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3298  s->c.mv_dir = 0;
3299  s->c.mv_type = MV_TYPE_16X16;
3300  s->c.mb_intra = 1;
3301  s->c.mv[0][0][0] = 0;
3302  s->c.mv[0][0][1] = 0;
3303  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3304  &dmin, &next_block, 0, 0);
3305  s->c.mbintra_table[xy] = 1;
3306  }
3307 
3308  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3309  if (best_s.c.mv_type == MV_TYPE_16X16) { //FIXME move 4mv after QPRD
3310  const int last_qp = backup_s.c.qscale;
3311  int qpi, qp, dc[6];
3312  int16_t ac[6][16];
3313  const int mvdir = (best_s.c.mv_dir & MV_DIR_BACKWARD) ? 1 : 0;
3314  static const int dquant_tab[4]={-1,1,-2,2};
3315  int storecoefs = s->c.mb_intra && s->c.dc_val;
3316 
3317  av_assert2(backup_s.dquant == 0);
3318 
3319  //FIXME intra
3320  s->c.mv_dir = best_s.c.mv_dir;
3321  s->c.mv_type = MV_TYPE_16X16;
3322  s->c.mb_intra = best_s.c.mb_intra;
3323  s->c.mv[0][0][0] = best_s.c.mv[0][0][0];
3324  s->c.mv[0][0][1] = best_s.c.mv[0][0][1];
3325  s->c.mv[1][0][0] = best_s.c.mv[1][0][0];
3326  s->c.mv[1][0][1] = best_s.c.mv[1][0][1];
3327 
3328  qpi = s->c.pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3329  for(; qpi<4; qpi++){
3330  int dquant= dquant_tab[qpi];
3331  qp= last_qp + dquant;
3332  if (qp < s->c.avctx->qmin || qp > s->c.avctx->qmax)
3333  continue;
3334  backup_s.dquant= dquant;
3335  if(storecoefs){
3336  for(i=0; i<6; i++){
3337  dc[i] = s->c.dc_val[s->c.block_index[i]];
3338  memcpy(ac[i], s->c.ac_val[s->c.block_index[i]], sizeof(*s->c.ac_val));
3339  }
3340  }
3341 
3342  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3343  &dmin, &next_block, s->c.mv[mvdir][0][0], s->c.mv[mvdir][0][1]);
3344  if (best_s.c.qscale != qp) {
3345  if(storecoefs){
3346  for(i=0; i<6; i++){
3347  s->c.dc_val[s->c.block_index[i]] = dc[i];
3348  memcpy(s->c.ac_val[s->c.block_index[i]], ac[i], sizeof(*s->c.ac_val));
3349  }
3350  }
3351  }
3352  }
3353  }
3354  }
3355  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3356  int mx= s->b_direct_mv_table[xy][0];
3357  int my= s->b_direct_mv_table[xy][1];
3358 
3359  backup_s.dquant = 0;
3360  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3361  s->c.mb_intra = 0;
3362  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3363  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3364  &dmin, &next_block, mx, my);
3365  }
3366  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3367  backup_s.dquant = 0;
3368  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3369  s->c.mb_intra = 0;
3370  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3371  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3372  &dmin, &next_block, 0, 0);
3373  }
3374  if (!best_s.c.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3375  int coded=0;
3376  for(i=0; i<6; i++)
3377  coded |= s->c.block_last_index[i];
3378  if(coded){
3379  int mx,my;
3380  memcpy(s->c.mv, best_s.c.mv, sizeof(s->c.mv));
3381  if (CONFIG_MPEG4_ENCODER && best_s.c.mv_dir & MV_DIRECT) {
3382  mx=my=0; //FIXME find the one we actually used
3383  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3384  } else if (best_s.c.mv_dir & MV_DIR_BACKWARD) {
3385  mx = s->c.mv[1][0][0];
3386  my = s->c.mv[1][0][1];
3387  }else{
3388  mx = s->c.mv[0][0][0];
3389  my = s->c.mv[0][0][1];
3390  }
3391 
3392  s->c.mv_dir = best_s.c.mv_dir;
3393  s->c.mv_type = best_s.c.mv_type;
3394  s->c.mb_intra = 0;
3395 /* s->c.mv[0][0][0] = best_s.mv[0][0][0];
3396  s->c.mv[0][0][1] = best_s.mv[0][0][1];
3397  s->c.mv[1][0][0] = best_s.mv[1][0][0];
3398  s->c.mv[1][0][1] = best_s.mv[1][0][1];*/
3399  backup_s.dquant= 0;
3400  s->skipdct=1;
3401  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3402  &dmin, &next_block, mx, my);
3403  s->skipdct=0;
3404  }
3405  }
3406 
3407  store_context_after_encode(s, &best_s, s->data_partitioning);
3408 
3409  pb_bits_count= put_bits_count(&s->pb);
3410  flush_put_bits(&s->pb);
3411  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3412  s->pb= backup_s.pb;
3413 
3414  if (s->data_partitioning) {
3415  pb2_bits_count= put_bits_count(&s->pb2);
3416  flush_put_bits(&s->pb2);
3417  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3418  s->pb2= backup_s.pb2;
3419 
3420  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3421  flush_put_bits(&s->tex_pb);
3422  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3423  s->tex_pb= backup_s.tex_pb;
3424  }
3425  s->last_bits= put_bits_count(&s->pb);
3426 
3427  if (CONFIG_H263_ENCODER &&
3428  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3430 
3431  if(next_block==0){ //FIXME 16 vs linesize16
3432  s->c.hdsp.put_pixels_tab[0][0](s->c.dest[0], s->c.sc.rd_scratchpad , s->c.linesize ,16);
3433  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[1], s->c.sc.rd_scratchpad + 16*s->c.linesize , s->c.uvlinesize, 8);
3434  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[2], s->c.sc.rd_scratchpad + 16*s->c.linesize + 8, s->c.uvlinesize, 8);
3435  }
3436 
3437  if (s->c.avctx->mb_decision == FF_MB_DECISION_BITS)
3438  mpv_reconstruct_mb(s, s->block);
3439  } else {
3440  int motion_x = 0, motion_y = 0;
3441  s->c.mv_type = MV_TYPE_16X16;
3442  // only one MB-Type possible
3443 
3444  switch(mb_type){
3446  s->c.mv_dir = 0;
3447  s->c.mb_intra = 1;
3448  motion_x= s->c.mv[0][0][0] = 0;
3449  motion_y= s->c.mv[0][0][1] = 0;
3450  s->c.mbintra_table[xy] = 1;
3451  break;
3453  s->c.mv_dir = MV_DIR_FORWARD;
3454  s->c.mb_intra = 0;
3455  motion_x= s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3456  motion_y= s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3457  break;
3459  s->c.mv_dir = MV_DIR_FORWARD;
3460  s->c.mv_type = MV_TYPE_FIELD;
3461  s->c.mb_intra = 0;
3462  for(i=0; i<2; i++){
3463  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3464  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3465  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3466  }
3467  break;
3469  s->c.mv_dir = MV_DIR_FORWARD;
3470  s->c.mv_type = MV_TYPE_8X8;
3471  s->c.mb_intra = 0;
3472  for(i=0; i<4; i++){
3473  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3474  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3475  }
3476  break;
3478  if (CONFIG_MPEG4_ENCODER) {
3480  s->c.mb_intra = 0;
3481  motion_x=s->b_direct_mv_table[xy][0];
3482  motion_y=s->b_direct_mv_table[xy][1];
3483  ff_mpeg4_set_direct_mv(&s->c, motion_x, motion_y);
3484  }
3485  break;
3487  if (CONFIG_MPEG4_ENCODER) {
3489  s->c.mb_intra = 0;
3490  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3491  }
3492  break;
3494  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3495  s->c.mb_intra = 0;
3496  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3497  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3498  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3499  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3500  break;
3502  s->c.mv_dir = MV_DIR_BACKWARD;
3503  s->c.mb_intra = 0;
3504  motion_x= s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3505  motion_y= s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3506  break;
3508  s->c.mv_dir = MV_DIR_FORWARD;
3509  s->c.mb_intra = 0;
3510  motion_x= s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3511  motion_y= s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3512  break;
3514  s->c.mv_dir = MV_DIR_FORWARD;
3515  s->c.mv_type = MV_TYPE_FIELD;
3516  s->c.mb_intra = 0;
3517  for(i=0; i<2; i++){
3518  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3519  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3520  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3521  }
3522  break;
3524  s->c.mv_dir = MV_DIR_BACKWARD;
3525  s->c.mv_type = MV_TYPE_FIELD;
3526  s->c.mb_intra = 0;
3527  for(i=0; i<2; i++){
3528  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3529  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3530  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3531  }
3532  break;
3534  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3535  s->c.mv_type = MV_TYPE_FIELD;
3536  s->c.mb_intra = 0;
3537  for(dir=0; dir<2; dir++){
3538  for(i=0; i<2; i++){
3539  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3540  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3541  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3542  }
3543  }
3544  break;
3545  default:
3546  av_unreachable("There is a case for every CANDIDATE_MB_TYPE_* "
3547  "except CANDIDATE_MB_TYPE_SKIPPED which is never "
3548  "the only candidate (always coupled with INTER) "
3549  "so that it never reaches this switch");
3550  }
3551 
3552  encode_mb(s, motion_x, motion_y);
3553 
3554  // RAL: Update last macroblock type
3555  s->last_mv_dir = s->c.mv_dir;
3556 
3557  if (CONFIG_H263_ENCODER &&
3558  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3560 
3561  mpv_reconstruct_mb(s, s->block);
3562  }
3563 
3564  s->c.cur_pic.qscale_table[xy] = s->c.qscale;
3565 
3566  /* clean the MV table in IPS frames for direct mode in B-frames */
3567  if (s->c.mb_intra /* && I,P,S_TYPE */) {
3568  s->p_mv_table[xy][0]=0;
3569  s->p_mv_table[xy][1]=0;
3570 #if CONFIG_H263_ENCODER
3571  } else if (s->c.h263_pred || s->c.h263_aic) {
3573 #endif
3574  }
3575 
3576  if (s->c.avctx->flags & AV_CODEC_FLAG_PSNR) {
3577  int w= 16;
3578  int h= 16;
3579 
3580  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
3581  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
3582 
3583  s->encoding_error[0] += sse(
3584  s, s->new_pic->data[0] + s->c.mb_x*16 + s->c.mb_y*s->c.linesize*16,
3585  s->c.dest[0], w, h, s->c.linesize);
3586  s->encoding_error[1] += sse(
3587  s, s->new_pic->data[1] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3588  s->c.dest[1], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3589  s->encoding_error[2] += sse(
3590  s, s->new_pic->data[2] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3591  s->c.dest[2], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3592  }
3593  if (s->loop_filter) {
3594  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263)
3595  ff_h263_loop_filter(&s->c);
3596  }
3597  ff_dlog(s->c.avctx, "MB %d %d bits\n",
3598  s->c.mb_x + s->c.mb_y * s->c.mb_stride, put_bits_count(&s->pb));
3599  }
3600  }
3601 
3602 #if CONFIG_MSMPEG4ENC
3603  //not beautiful here but we must write it before flushing so it has to be here
3604  if (s->c.msmpeg4_version != MSMP4_UNUSED && s->c.msmpeg4_version < MSMP4_WMV1 &&
3605  s->c.pict_type == AV_PICTURE_TYPE_I)
3607 #endif
3608 
3609  write_slice_end(s);
3610 
3611  return 0;
3612 }
3613 
3614 #define ADD(field) dst->field += src->field;
3615 #define MERGE(field) dst->field += src->field; src->field=0
3617 {
3618  ADD(me.scene_change_score);
3619  ADD(me.mc_mb_var_sum_temp);
3620  ADD(me.mb_var_sum_temp);
3621 }
3622 
3624 {
3625  int i;
3626 
3627  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3628  MERGE(dct_count[1]);
3629  ADD(mv_bits);
3630  ADD(i_tex_bits);
3631  ADD(p_tex_bits);
3632  ADD(i_count);
3633  ADD(misc_bits);
3634  ADD(encoding_error[0]);
3635  ADD(encoding_error[1]);
3636  ADD(encoding_error[2]);
3637 
3638  if (dst->dct_error_sum) {
3639  for(i=0; i<64; i++){
3640  MERGE(dct_error_sum[0][i]);
3641  MERGE(dct_error_sum[1][i]);
3642  }
3643  }
3644 
3645  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3646  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3647  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3648  flush_put_bits(&dst->pb);
3649 }
3650 
3651 static int estimate_qp(MPVMainEncContext *const m, int dry_run)
3652 {
3653  MPVEncContext *const s = &m->s;
3654 
3655  if (m->next_lambda){
3656  s->c.cur_pic.ptr->f->quality = m->next_lambda;
3657  if(!dry_run) m->next_lambda= 0;
3658  } else if (!m->fixed_qscale) {
3659  int quality = ff_rate_estimate_qscale(m, dry_run);
3660  s->c.cur_pic.ptr->f->quality = quality;
3661  if (s->c.cur_pic.ptr->f->quality < 0)
3662  return -1;
3663  }
3664 
3665  if(s->adaptive_quant){
3666  init_qscale_tab(s);
3667 
3668  switch (s->c.codec_id) {
3669  case AV_CODEC_ID_MPEG4:
3670  if (CONFIG_MPEG4_ENCODER)
3672  break;
3673  case AV_CODEC_ID_H263:
3674  case AV_CODEC_ID_H263P:
3675  case AV_CODEC_ID_FLV1:
3676  if (CONFIG_H263_ENCODER)
3678  break;
3679  }
3680 
3681  s->lambda = s->lambda_table[0];
3682  //FIXME broken
3683  }else
3684  s->lambda = s->c.cur_pic.ptr->f->quality;
3685  update_qscale(m);
3686  return 0;
3687 }
3688 
3689 /* must be called before writing the header */
3691 {
3692  av_assert1(s->c.cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3693  s->c.time = s->c.cur_pic.ptr->f->pts * s->c.avctx->time_base.num;
3694 
3695  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3696  s->c.pb_time = s->c.pp_time - (s->c.last_non_b_time - s->c.time);
3697  av_assert1(s->c.pb_time > 0 && s->c.pb_time < s->c.pp_time);
3698  }else{
3699  s->c.pp_time = s->c.time - s->c.last_non_b_time;
3700  s->c.last_non_b_time = s->c.time;
3701  av_assert1(s->picture_number == 0 || s->c.pp_time > 0);
3702  }
3703 }
3704 
3705 static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
3706 {
3707  MPVEncContext *const s = &m->s;
3708  int i, ret;
3709  int bits;
3710  int context_count = s->c.slice_context_count;
3711 
3712  /* we need to initialize some time vars before we can encode B-frames */
3713  // RAL: Condition added for MPEG1VIDEO
3714  if (s->c.out_format == FMT_MPEG1 || (s->c.h263_pred && s->c.msmpeg4_version == MSMP4_UNUSED))
3716  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4)
3718 
3719 // s->lambda = s->c.cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3720 
3721  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3722  s->c.no_rounding = s->c.msmpeg4_version >= MSMP4_V3;
3723  } else if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3724  s->c.no_rounding ^= s->flipflop_rounding;
3725  }
3726 
3727  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
3728  ret = estimate_qp(m, 1);
3729  if (ret < 0)
3730  return ret;
3731  ff_get_2pass_fcode(m);
3732  } else if (!(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3733  if (s->c.pict_type == AV_PICTURE_TYPE_B)
3734  s->lambda = m->last_lambda_for[s->c.pict_type];
3735  else
3736  s->lambda = m->last_lambda_for[m->last_non_b_pict_type];
3737  update_qscale(m);
3738  }
3739 
3740  s->c.mb_intra = 0; //for the rate distortion & bit compare functions
3741  for (int i = 0; i < context_count; i++) {
3742  MPVEncContext *const slice = s->c.enc_contexts[i];
3743  int h = s->c.mb_height;
3744  uint8_t *start = pkt->data + (int64_t)pkt->size * slice->c.start_mb_y / h;
3745  uint8_t *end = pkt->data + (int64_t)pkt->size * slice->c. end_mb_y / h;
3746 
3747  init_put_bits(&slice->pb, start, end - start);
3748 
3749  if (i) {
3750  ret = ff_update_duplicate_context(&slice->c, &s->c);
3751  if (ret < 0)
3752  return ret;
3753  slice->lambda = s->lambda;
3754  slice->lambda2 = s->lambda2;
3755  }
3756  slice->me.temp = slice->me.scratchpad = slice->c.sc.scratchpad_buf;
3757  ff_me_init_pic(slice);
3758  }
3759 
3760  /* Estimate motion for every MB */
3761  if (s->c.pict_type != AV_PICTURE_TYPE_I) {
3762  s->lambda = (s->lambda * m->me_penalty_compensation + 128) >> 8;
3763  s->lambda2 = (s->lambda2 * (int64_t) m->me_penalty_compensation + 128) >> 8;
3764  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3765  if ((m->me_pre && m->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3766  m->me_pre == 2) {
3767  s->c.avctx->execute(s->c.avctx, pre_estimate_motion_thread,
3768  &s->c.enc_contexts[0], NULL,
3769  context_count, sizeof(void*));
3770  }
3771  }
3772 
3773  s->c.avctx->execute(s->c.avctx, estimate_motion_thread, &s->c.enc_contexts[0],
3774  NULL, context_count, sizeof(void*));
3775  }else /* if (s->c.pict_type == AV_PICTURE_TYPE_I) */{
3776  /* I-Frame */
3777  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3778  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3779 
3780  if (!m->fixed_qscale) {
3781  /* finding spatial complexity for I-frame rate control */
3782  s->c.avctx->execute(s->c.avctx, mb_var_thread, &s->c.enc_contexts[0],
3783  NULL, context_count, sizeof(void*));
3784  }
3785  }
3786  for(i=1; i<context_count; i++){
3787  merge_context_after_me(s, s->c.enc_contexts[i]);
3788  }
3789  m->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3790  m->mb_var_sum = s->me. mb_var_sum_temp;
3791  emms_c();
3792 
3793  if (s->me.scene_change_score > m->scenechange_threshold &&
3794  s->c.pict_type == AV_PICTURE_TYPE_P) {
3795  s->c.pict_type = AV_PICTURE_TYPE_I;
3796  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3797  s->mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
3798  if (s->c.msmpeg4_version >= MSMP4_V3)
3799  s->c.no_rounding = 1;
3800  ff_dlog(s->c.avctx, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3801  m->mb_var_sum, m->mc_mb_var_sum);
3802  }
3803 
3804  if (!s->umvplus) {
3805  if (s->c.pict_type == AV_PICTURE_TYPE_P || s->c.pict_type == AV_PICTURE_TYPE_S) {
3806  s->f_code = ff_get_best_fcode(m, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3807 
3808  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3809  int a,b;
3810  a = ff_get_best_fcode(m, s->c.p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3811  b = ff_get_best_fcode(m, s->c.p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3812  s->f_code = FFMAX3(s->f_code, a, b);
3813  }
3814 
3816  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3817  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3818  int j;
3819  for(i=0; i<2; i++){
3820  for(j=0; j<2; j++)
3821  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3822  s->c.p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3823  }
3824  }
3825  } else if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3826  int a, b;
3827 
3828  a = ff_get_best_fcode(m, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3829  b = ff_get_best_fcode(m, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3830  s->f_code = FFMAX(a, b);
3831 
3832  a = ff_get_best_fcode(m, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3833  b = ff_get_best_fcode(m, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3834  s->b_code = FFMAX(a, b);
3835 
3836  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3837  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3838  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3839  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3840  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3841  int dir, j;
3842  for(dir=0; dir<2; dir++){
3843  for(i=0; i<2; i++){
3844  for(j=0; j<2; j++){
3847  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3848  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3849  }
3850  }
3851  }
3852  }
3853  }
3854  }
3855 
3856  ret = estimate_qp(m, 0);
3857  if (ret < 0)
3858  return ret;
3859 
3860  if (s->c.qscale < 3 && s->max_qcoeff <= 128 &&
3861  s->c.pict_type == AV_PICTURE_TYPE_I &&
3862  !(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE))
3863  s->c.qscale = 3; //reduce clipping problems
3864 
3865  if (s->c.out_format == FMT_MJPEG) {
3867  (7 + s->c.qscale) / s->c.qscale, 65535);
3868  if (ret < 0)
3869  return ret;
3870 
3871  if (s->c.codec_id != AV_CODEC_ID_AMV) {
3872  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3873  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3874 
3875  if (s->c.avctx->intra_matrix) {
3876  chroma_matrix =
3877  luma_matrix = s->c.avctx->intra_matrix;
3878  }
3879  if (s->c.avctx->chroma_intra_matrix)
3880  chroma_matrix = s->c.avctx->chroma_intra_matrix;
3881 
3882  /* for mjpeg, we do include qscale in the matrix */
3883  for (int i = 1; i < 64; i++) {
3884  int j = s->c.idsp.idct_permutation[i];
3885 
3886  s->c.chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->c.qscale) >> 3);
3887  s->c. intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->c.qscale) >> 3);
3888  }
3889  s->c.y_dc_scale_table =
3890  s->c.c_dc_scale_table = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision];
3891  s->c.chroma_intra_matrix[0] =
3892  s->c.intra_matrix[0] = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision][8];
3893  } else {
3894  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3895  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3896  for (int i = 1; i < 64; i++) {
3897  int j = s->c.idsp.idct_permutation[ff_zigzag_direct[i]];
3898 
3899  s->c.intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3900  s->c.chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3901  }
3902  s->c.y_dc_scale_table = y;
3903  s->c.c_dc_scale_table = c;
3904  s->c.intra_matrix[0] = 13;
3905  s->c.chroma_intra_matrix[0] = 14;
3906  }
3907  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3908  s->c.intra_matrix, s->intra_quant_bias, 8, 8, 1);
3909  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3910  s->c.chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3911  s->c.qscale = 8;
3912  }
3913 
3914  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3915  s->c.cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3916  } else {
3917  s->c.cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3918  }
3919  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
3920 
3921  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3922  m->picture_in_gop_number = 0;
3923 
3924  s->c.mb_x = s->c.mb_y = 0;
3925  s->last_bits= put_bits_count(&s->pb);
3926  ret = m->encode_picture_header(m);
3927  if (ret < 0)
3928  return ret;
3929  bits= put_bits_count(&s->pb);
3930  m->header_bits = bits - s->last_bits;
3931 
3932  for(i=1; i<context_count; i++){
3933  update_duplicate_context_after_me(s->c.enc_contexts[i], s);
3934  }
3935  s->c.avctx->execute(s->c.avctx, encode_thread, &s->c.enc_contexts[0],
3936  NULL, context_count, sizeof(void*));
3937  for(i=1; i<context_count; i++){
3938  if (s->pb.buf_end == s->c.enc_contexts[i]->pb.buf)
3939  set_put_bits_buffer_size(&s->pb, FFMIN(s->c.enc_contexts[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3940  merge_context_after_encode(s, s->c.enc_contexts[i]);
3941  }
3942  emms_c();
3943  return 0;
3944 }
3945 
3946 static inline void denoise_dct(MPVEncContext *const s, int16_t block[])
3947 {
3948  if (!s->dct_error_sum)
3949  return;
3950 
3951  const int intra = s->c.mb_intra;
3952  s->dct_count[intra]++;
3953  s->mpvencdsp.denoise_dct(block, s->dct_error_sum[intra], s->dct_offset[intra]);
3954 }
3955 
3957  int16_t *block, int n,
3958  int qscale, int *overflow){
3959  const int *qmat;
3960  const uint16_t *matrix;
3961  const uint8_t *scantable;
3962  const uint8_t *perm_scantable;
3963  int max=0;
3964  unsigned int threshold1, threshold2;
3965  int bias=0;
3966  int run_tab[65];
3967  int level_tab[65];
3968  int score_tab[65];
3969  int survivor[65];
3970  int survivor_count;
3971  int last_run=0;
3972  int last_level=0;
3973  int last_score= 0;
3974  int last_i;
3975  int coeff[2][64];
3976  int coeff_count[64];
3977  int qmul, qadd, start_i, last_non_zero, i, dc;
3978  const int esc_length= s->ac_esc_length;
3979  const uint8_t *length, *last_length;
3980  const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3981  int mpeg2_qscale;
3982 
3983  s->fdsp.fdct(block);
3984 
3985  denoise_dct(s, block);
3986 
3987  qmul= qscale*16;
3988  qadd= ((qscale-1)|1)*8;
3989 
3990  if (s->c.q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3991  else mpeg2_qscale = qscale << 1;
3992 
3993  if (s->c.mb_intra) {
3994  int q;
3995  scantable = s->c.intra_scantable.scantable;
3996  perm_scantable = s->c.intra_scantable.permutated;
3997  if (!s->c.h263_aic) {
3998  if (n < 4)
3999  q = s->c.y_dc_scale;
4000  else
4001  q = s->c.c_dc_scale;
4002  q = q << 3;
4003  } else{
4004  /* For AIC we skip quant/dequant of INTRADC */
4005  q = 1 << 3;
4006  qadd=0;
4007  }
4008 
4009  /* note: block[0] is assumed to be positive */
4010  block[0] = (block[0] + (q >> 1)) / q;
4011  start_i = 1;
4012  last_non_zero = 0;
4013  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4014  matrix = n < 4 ? s->c.intra_matrix : s->c.chroma_intra_matrix;
4015  if (s->mpeg_quant || s->c.out_format == FMT_MPEG1 || s->c.out_format == FMT_MJPEG)
4016  bias= 1<<(QMAT_SHIFT-1);
4017 
4018  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4019  length = s->intra_chroma_ac_vlc_length;
4020  last_length= s->intra_chroma_ac_vlc_last_length;
4021  } else {
4022  length = s->intra_ac_vlc_length;
4023  last_length= s->intra_ac_vlc_last_length;
4024  }
4025  } else {
4026  scantable = s->c.inter_scantable.scantable;
4027  perm_scantable = s->c.inter_scantable.permutated;
4028  start_i = 0;
4029  last_non_zero = -1;
4030  qmat = s->q_inter_matrix[qscale];
4031  matrix = s->c.inter_matrix;
4032  length = s->inter_ac_vlc_length;
4033  last_length= s->inter_ac_vlc_last_length;
4034  }
4035  last_i= start_i;
4036 
4037  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4038  threshold2= (threshold1<<1);
4039 
4040  for(i=63; i>=start_i; i--) {
4041  const int j = scantable[i];
4042  int64_t level = (int64_t)block[j] * qmat[j];
4043 
4044  if(((uint64_t)(level+threshold1))>threshold2){
4045  last_non_zero = i;
4046  break;
4047  }
4048  }
4049 
4050  for(i=start_i; i<=last_non_zero; i++) {
4051  const int j = scantable[i];
4052  int64_t level = (int64_t)block[j] * qmat[j];
4053 
4054 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4055 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4056  if(((uint64_t)(level+threshold1))>threshold2){
4057  if(level>0){
4058  level= (bias + level)>>QMAT_SHIFT;
4059  coeff[0][i]= level;
4060  coeff[1][i]= level-1;
4061 // coeff[2][k]= level-2;
4062  }else{
4063  level= (bias - level)>>QMAT_SHIFT;
4064  coeff[0][i]= -level;
4065  coeff[1][i]= -level+1;
4066 // coeff[2][k]= -level+2;
4067  }
4068  coeff_count[i]= FFMIN(level, 2);
4069  av_assert2(coeff_count[i]);
4070  max |=level;
4071  }else{
4072  coeff[0][i]= (level>>31)|1;
4073  coeff_count[i]= 1;
4074  }
4075  }
4076 
4077  *overflow= s->max_qcoeff < max; //overflow might have happened
4078 
4079  if(last_non_zero < start_i){
4080  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4081  return last_non_zero;
4082  }
4083 
4084  score_tab[start_i]= 0;
4085  survivor[0]= start_i;
4086  survivor_count= 1;
4087 
4088  for(i=start_i; i<=last_non_zero; i++){
4089  int level_index, j, zero_distortion;
4090  int dct_coeff= FFABS(block[ scantable[i] ]);
4091  int best_score=256*256*256*120;
4092 
4093  if (s->fdsp.fdct == ff_fdct_ifast)
4094  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4095  zero_distortion= dct_coeff*dct_coeff;
4096 
4097  for(level_index=0; level_index < coeff_count[i]; level_index++){
4098  int distortion;
4099  int level= coeff[level_index][i];
4100  const int alevel= FFABS(level);
4101  int unquant_coeff;
4102 
4103  av_assert2(level);
4104 
4105  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4106  unquant_coeff= alevel*qmul + qadd;
4107  } else if (s->c.out_format == FMT_MJPEG) {
4108  j = s->c.idsp.idct_permutation[scantable[i]];
4109  unquant_coeff = alevel * matrix[j] * 8;
4110  }else{ // MPEG-1
4111  j = s->c.idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4112  if (s->c.mb_intra) {
4113  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4114  unquant_coeff = (unquant_coeff - 1) | 1;
4115  }else{
4116  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4117  unquant_coeff = (unquant_coeff - 1) | 1;
4118  }
4119  unquant_coeff<<= 3;
4120  }
4121 
4122  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4123  level+=64;
4124  if((level&(~127)) == 0){
4125  for(j=survivor_count-1; j>=0; j--){
4126  int run= i - survivor[j];
4127  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4128  score += score_tab[i-run];
4129 
4130  if(score < best_score){
4131  best_score= score;
4132  run_tab[i+1]= run;
4133  level_tab[i+1]= level-64;
4134  }
4135  }
4136 
4137  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4138  for(j=survivor_count-1; j>=0; j--){
4139  int run= i - survivor[j];
4140  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4141  score += score_tab[i-run];
4142  if(score < last_score){
4143  last_score= score;
4144  last_run= run;
4145  last_level= level-64;
4146  last_i= i+1;
4147  }
4148  }
4149  }
4150  }else{
4151  distortion += esc_length*lambda;
4152  for(j=survivor_count-1; j>=0; j--){
4153  int run= i - survivor[j];
4154  int score= distortion + score_tab[i-run];
4155 
4156  if(score < best_score){
4157  best_score= score;
4158  run_tab[i+1]= run;
4159  level_tab[i+1]= level-64;
4160  }
4161  }
4162 
4163  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4164  for(j=survivor_count-1; j>=0; j--){
4165  int run= i - survivor[j];
4166  int score= distortion + score_tab[i-run];
4167  if(score < last_score){
4168  last_score= score;
4169  last_run= run;
4170  last_level= level-64;
4171  last_i= i+1;
4172  }
4173  }
4174  }
4175  }
4176  }
4177 
4178  score_tab[i+1]= best_score;
4179 
4180  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4181  if(last_non_zero <= 27){
4182  for(; survivor_count; survivor_count--){
4183  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4184  break;
4185  }
4186  }else{
4187  for(; survivor_count; survivor_count--){
4188  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4189  break;
4190  }
4191  }
4192 
4193  survivor[ survivor_count++ ]= i+1;
4194  }
4195 
4196  if (s->c.out_format != FMT_H263 && s->c.out_format != FMT_H261) {
4197  last_score= 256*256*256*120;
4198  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4199  int score= score_tab[i];
4200  if (i)
4201  score += lambda * 2; // FIXME more exact?
4202 
4203  if(score < last_score){
4204  last_score= score;
4205  last_i= i;
4206  last_level= level_tab[i];
4207  last_run= run_tab[i];
4208  }
4209  }
4210  }
4211 
4212  s->coded_score[n] = last_score;
4213 
4214  dc= FFABS(block[0]);
4215  last_non_zero= last_i - 1;
4216  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4217 
4218  if(last_non_zero < start_i)
4219  return last_non_zero;
4220 
4221  if(last_non_zero == 0 && start_i == 0){
4222  int best_level= 0;
4223  int best_score= dc * dc;
4224 
4225  for(i=0; i<coeff_count[0]; i++){
4226  int level= coeff[i][0];
4227  int alevel= FFABS(level);
4228  int unquant_coeff, score, distortion;
4229 
4230  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4231  unquant_coeff= (alevel*qmul + qadd)>>3;
4232  } else{ // MPEG-1
4233  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4234  unquant_coeff = (unquant_coeff - 1) | 1;
4235  }
4236  unquant_coeff = (unquant_coeff + 4) >> 3;
4237  unquant_coeff<<= 3 + 3;
4238 
4239  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4240  level+=64;
4241  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4242  else score= distortion + esc_length*lambda;
4243 
4244  if(score < best_score){
4245  best_score= score;
4246  best_level= level - 64;
4247  }
4248  }
4249  block[0]= best_level;
4250  s->coded_score[n] = best_score - dc*dc;
4251  if(best_level == 0) return -1;
4252  else return last_non_zero;
4253  }
4254 
4255  i= last_i;
4256  av_assert2(last_level);
4257 
4258  block[ perm_scantable[last_non_zero] ]= last_level;
4259  i -= last_run + 1;
4260 
4261  for(; i>start_i; i -= run_tab[i] + 1){
4262  block[ perm_scantable[i-1] ]= level_tab[i];
4263  }
4264 
4265  return last_non_zero;
4266 }
4267 
4268 static DECLARE_ALIGNED(16, int16_t, basis)[64][64];
4269 
4270 static void build_basis(uint8_t *perm){
4271  int i, j, x, y;
4272  emms_c();
4273  for(i=0; i<8; i++){
4274  for(j=0; j<8; j++){
4275  for(y=0; y<8; y++){
4276  for(x=0; x<8; x++){
4277  double s= 0.25*(1<<BASIS_SHIFT);
4278  int index= 8*i + j;
4279  int perm_index= perm[index];
4280  if(i==0) s*= sqrt(0.5);
4281  if(j==0) s*= sqrt(0.5);
4282  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4283  }
4284  }
4285  }
4286  }
4287 }
4288 
4289 static int dct_quantize_refine(MPVEncContext *const s, //FIXME breaks denoise?
4290  int16_t *block, int16_t *weight, int16_t *orig,
4291  int n, int qscale){
4292  DECLARE_ALIGNED(16, int16_t, rem)[64];
4293  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4294  const uint8_t *scantable;
4295  const uint8_t *perm_scantable;
4296 // unsigned int threshold1, threshold2;
4297 // int bias=0;
4298  int run_tab[65];
4299  int prev_run=0;
4300  int prev_level=0;
4301  int qmul, qadd, start_i, last_non_zero, i, dc;
4302  const uint8_t *length;
4303  const uint8_t *last_length;
4304  int lambda;
4305  int rle_index, run, q = 1, sum; //q is only used when s->c.mb_intra is true
4306 
4307  if(basis[0][0] == 0)
4308  build_basis(s->c.idsp.idct_permutation);
4309 
4310  qmul= qscale*2;
4311  qadd= (qscale-1)|1;
4312  if (s->c.mb_intra) {
4313  scantable = s->c.intra_scantable.scantable;
4314  perm_scantable = s->c.intra_scantable.permutated;
4315  if (!s->c.h263_aic) {
4316  if (n < 4)
4317  q = s->c.y_dc_scale;
4318  else
4319  q = s->c.c_dc_scale;
4320  } else{
4321  /* For AIC we skip quant/dequant of INTRADC */
4322  q = 1;
4323  qadd=0;
4324  }
4325  q <<= RECON_SHIFT-3;
4326  /* note: block[0] is assumed to be positive */
4327  dc= block[0]*q;
4328 // block[0] = (block[0] + (q >> 1)) / q;
4329  start_i = 1;
4330 // if (s->mpeg_quant || s->c.out_format == FMT_MPEG1)
4331 // bias= 1<<(QMAT_SHIFT-1);
4332  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4333  length = s->intra_chroma_ac_vlc_length;
4334  last_length= s->intra_chroma_ac_vlc_last_length;
4335  } else {
4336  length = s->intra_ac_vlc_length;
4337  last_length= s->intra_ac_vlc_last_length;
4338  }
4339  } else {
4340  scantable = s->c.inter_scantable.scantable;
4341  perm_scantable = s->c.inter_scantable.permutated;
4342  dc= 0;
4343  start_i = 0;
4344  length = s->inter_ac_vlc_length;
4345  last_length= s->inter_ac_vlc_last_length;
4346  }
4347  last_non_zero = s->c.block_last_index[n];
4348 
4349  dc += (1<<(RECON_SHIFT-1));
4350  for(i=0; i<64; i++){
4351  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4352  }
4353 
4354  sum=0;
4355  for(i=0; i<64; i++){
4356  int one= 36;
4357  int qns=4;
4358  int w;
4359 
4360  w= FFABS(weight[i]) + qns*one;
4361  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4362 
4363  weight[i] = w;
4364 // w=weight[i] = (63*qns + (w/2)) / w;
4365 
4366  av_assert2(w>0);
4367  av_assert2(w<(1<<6));
4368  sum += w*w;
4369  }
4370  lambda = sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4371 
4372  run=0;
4373  rle_index=0;
4374  for(i=start_i; i<=last_non_zero; i++){
4375  int j= perm_scantable[i];
4376  const int level= block[j];
4377  int coeff;
4378 
4379  if(level){
4380  if(level<0) coeff= qmul*level - qadd;
4381  else coeff= qmul*level + qadd;
4382  run_tab[rle_index++]=run;
4383  run=0;
4384 
4385  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4386  }else{
4387  run++;
4388  }
4389  }
4390 
4391  for(;;){
4392  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4393  int best_coeff=0;
4394  int best_change=0;
4395  int run2, best_unquant_change=0, analyze_gradient;
4396  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4397 
4398  if(analyze_gradient){
4399  for(i=0; i<64; i++){
4400  int w= weight[i];
4401 
4402  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4403  }
4404  s->fdsp.fdct(d1);
4405  }
4406 
4407  if(start_i){
4408  const int level= block[0];
4409  int change, old_coeff;
4410 
4411  av_assert2(s->c.mb_intra);
4412 
4413  old_coeff= q*level;
4414 
4415  for(change=-1; change<=1; change+=2){
4416  int new_level= level + change;
4417  int score, new_coeff;
4418 
4419  new_coeff= q*new_level;
4420  if(new_coeff >= 2048 || new_coeff < 0)
4421  continue;
4422 
4423  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4424  new_coeff - old_coeff);
4425  if(score<best_score){
4426  best_score= score;
4427  best_coeff= 0;
4428  best_change= change;
4429  best_unquant_change= new_coeff - old_coeff;
4430  }
4431  }
4432  }
4433 
4434  run=0;
4435  rle_index=0;
4436  run2= run_tab[rle_index++];
4437  prev_level=0;
4438  prev_run=0;
4439 
4440  for(i=start_i; i<64; i++){
4441  int j= perm_scantable[i];
4442  const int level= block[j];
4443  int change, old_coeff;
4444 
4445  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4446  break;
4447 
4448  if(level){
4449  if(level<0) old_coeff= qmul*level - qadd;
4450  else old_coeff= qmul*level + qadd;
4451  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4452  }else{
4453  old_coeff=0;
4454  run2--;
4455  av_assert2(run2>=0 || i >= last_non_zero );
4456  }
4457 
4458  for(change=-1; change<=1; change+=2){
4459  int new_level= level + change;
4460  int score, new_coeff, unquant_change;
4461 
4462  score=0;
4463  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4464  continue;
4465 
4466  if(new_level){
4467  if(new_level<0) new_coeff= qmul*new_level - qadd;
4468  else new_coeff= qmul*new_level + qadd;
4469  if(new_coeff >= 2048 || new_coeff <= -2048)
4470  continue;
4471  //FIXME check for overflow
4472 
4473  if(level){
4474  if(level < 63 && level > -63){
4475  if(i < last_non_zero)
4476  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4477  - length[UNI_AC_ENC_INDEX(run, level+64)];
4478  else
4479  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4480  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4481  }
4482  }else{
4483  av_assert2(FFABS(new_level)==1);
4484 
4485  if(analyze_gradient){
4486  int g= d1[ scantable[i] ];
4487  if(g && (g^new_level) >= 0)
4488  continue;
4489  }
4490 
4491  if(i < last_non_zero){
4492  int next_i= i + run2 + 1;
4493  int next_level= block[ perm_scantable[next_i] ] + 64;
4494 
4495  if(next_level&(~127))
4496  next_level= 0;
4497 
4498  if(next_i < last_non_zero)
4499  score += length[UNI_AC_ENC_INDEX(run, 65)]
4500  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4501  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4502  else
4503  score += length[UNI_AC_ENC_INDEX(run, 65)]
4504  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4505  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4506  }else{
4507  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4508  if(prev_level){
4509  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4510  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4511  }
4512  }
4513  }
4514  }else{
4515  new_coeff=0;
4516  av_assert2(FFABS(level)==1);
4517 
4518  if(i < last_non_zero){
4519  int next_i= i + run2 + 1;
4520  int next_level= block[ perm_scantable[next_i] ] + 64;
4521 
4522  if(next_level&(~127))
4523  next_level= 0;
4524 
4525  if(next_i < last_non_zero)
4526  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4527  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4528  - length[UNI_AC_ENC_INDEX(run, 65)];
4529  else
4530  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4531  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4532  - length[UNI_AC_ENC_INDEX(run, 65)];
4533  }else{
4534  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4535  if(prev_level){
4536  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4537  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4538  }
4539  }
4540  }
4541 
4542  score *= lambda;
4543 
4544  unquant_change= new_coeff - old_coeff;
4545  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4546 
4547  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4548  unquant_change);
4549  if(score<best_score){
4550  best_score= score;
4551  best_coeff= i;
4552  best_change= change;
4553  best_unquant_change= unquant_change;
4554  }
4555  }
4556  if(level){
4557  prev_level= level + 64;
4558  if(prev_level&(~127))
4559  prev_level= 0;
4560  prev_run= run;
4561  run=0;
4562  }else{
4563  run++;
4564  }
4565  }
4566 
4567  if(best_change){
4568  int j= perm_scantable[ best_coeff ];
4569 
4570  block[j] += best_change;
4571 
4572  if(best_coeff > last_non_zero){
4573  last_non_zero= best_coeff;
4574  av_assert2(block[j]);
4575  }else{
4576  for(; last_non_zero>=start_i; last_non_zero--){
4577  if(block[perm_scantable[last_non_zero]])
4578  break;
4579  }
4580  }
4581 
4582  run=0;
4583  rle_index=0;
4584  for(i=start_i; i<=last_non_zero; i++){
4585  int j= perm_scantable[i];
4586  const int level= block[j];
4587 
4588  if(level){
4589  run_tab[rle_index++]=run;
4590  run=0;
4591  }else{
4592  run++;
4593  }
4594  }
4595 
4596  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4597  }else{
4598  break;
4599  }
4600  }
4601 
4602  return last_non_zero;
4603 }
4604 
4605 /**
4606  * Permute an 8x8 block according to permutation.
4607  * @param block the block which will be permuted according to
4608  * the given permutation vector
4609  * @param permutation the permutation vector
4610  * @param last the last non zero coefficient in scantable order, used to
4611  * speed the permutation up
4612  * @param scantable the used scantable, this is only used to speed the
4613  * permutation up, the block is not (inverse) permutated
4614  * to scantable order!
4615  */
4616 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4617  const uint8_t *scantable, int last)
4618 {
4619  int i;
4620  int16_t temp[64];
4621 
4622  if (last <= 0)
4623  return;
4624  //FIXME it is ok but not clean and might fail for some permutations
4625  // if (permutation[1] == 1)
4626  // return;
4627 
4628  for (i = 0; i <= last; i++) {
4629  const int j = scantable[i];
4630  temp[j] = block[j];
4631  block[j] = 0;
4632  }
4633 
4634  for (i = 0; i <= last; i++) {
4635  const int j = scantable[i];
4636  const int perm_j = permutation[j];
4637  block[perm_j] = temp[j];
4638  }
4639 }
4640 
4641 static int dct_quantize_c(MPVEncContext *const s,
4642  int16_t *block, int n,
4643  int qscale, int *overflow)
4644 {
4645  int i, last_non_zero, q, start_i;
4646  const int *qmat;
4647  const uint8_t *scantable;
4648  int bias;
4649  int max=0;
4650  unsigned int threshold1, threshold2;
4651 
4652  s->fdsp.fdct(block);
4653 
4654  denoise_dct(s, block);
4655 
4656  if (s->c.mb_intra) {
4657  scantable = s->c.intra_scantable.scantable;
4658  if (!s->c.h263_aic) {
4659  if (n < 4)
4660  q = s->c.y_dc_scale;
4661  else
4662  q = s->c.c_dc_scale;
4663  q = q << 3;
4664  } else
4665  /* For AIC we skip quant/dequant of INTRADC */
4666  q = 1 << 3;
4667 
4668  /* note: block[0] is assumed to be positive */
4669  block[0] = (block[0] + (q >> 1)) / q;
4670  start_i = 1;
4671  last_non_zero = 0;
4672  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4673  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4674  } else {
4675  scantable = s->c.inter_scantable.scantable;
4676  start_i = 0;
4677  last_non_zero = -1;
4678  qmat = s->q_inter_matrix[qscale];
4679  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4680  }
4681  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4682  threshold2= (threshold1<<1);
4683  for(i=63;i>=start_i;i--) {
4684  const int j = scantable[i];
4685  int64_t level = (int64_t)block[j] * qmat[j];
4686 
4687  if(((uint64_t)(level+threshold1))>threshold2){
4688  last_non_zero = i;
4689  break;
4690  }else{
4691  block[j]=0;
4692  }
4693  }
4694  for(i=start_i; i<=last_non_zero; i++) {
4695  const int j = scantable[i];
4696  int64_t level = (int64_t)block[j] * qmat[j];
4697 
4698 // if( bias+level >= (1<<QMAT_SHIFT)
4699 // || bias-level >= (1<<QMAT_SHIFT)){
4700  if(((uint64_t)(level+threshold1))>threshold2){
4701  if(level>0){
4702  level= (bias + level)>>QMAT_SHIFT;
4703  block[j]= level;
4704  }else{
4705  level= (bias - level)>>QMAT_SHIFT;
4706  block[j]= -level;
4707  }
4708  max |=level;
4709  }else{
4710  block[j]=0;
4711  }
4712  }
4713  *overflow= s->max_qcoeff < max; //overflow might have happened
4714 
4715  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4716  if (s->c.idsp.perm_type != FF_IDCT_PERM_NONE)
4717  ff_block_permute(block, s->c.idsp.idct_permutation,
4718  scantable, last_non_zero);
4719 
4720  return last_non_zero;
4721 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1495
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3956
put_dct
static void put_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1152
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:31
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
Definition: motion_est.c:1659
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:359
FF_MATRIX_TYPE_INTRA
#define FF_MATRIX_TYPE_INTRA
Check if the elements of codec context matrices (intra_matrix, inter_matrix or chroma_intra_matrix) a...
Definition: encode.h:118
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:82
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:219
mpegvideo_unquantize.h
MPVMainEncContext::me_pre
int me_pre
prepass for motion estimation
Definition: mpegvideoenc.h:267
ff_fix_long_mvs
void ff_fix_long_mvs(MPVEncContext *const s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1708
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
MPVMainEncContext::fcode_tab
const uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideoenc.h:242
MPVMainEncContext::fixed_qscale
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideoenc.h:260
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:301
encode_mb_hq
static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2717
MPVMainEncContext::frame_skip_cmp_fn
me_cmp_func frame_skip_cmp_fn
Definition: mpegvideoenc.h:249
MPVMainEncContext::bit_rate
int64_t bit_rate
Definition: mpegvideoenc.h:254
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MPVEncContext *const s, int n, int threshold)
Definition: mpegvideo_enc.c:2159
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:172
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
h263data.h
init_unquantize
static av_cold void init_unquantize(MPVEncContext *const s2, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:310
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:101
encode_mb
static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2620
level
uint8_t level
Definition: svq3.c:208
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1491
av_clip
#define av_clip
Definition: common.h:100
MPVEncContext
Definition: mpegvideoenc.h:46
MPVEncContext::b_code
int b_code
backward MV resolution for B-frames
Definition: mpegvideoenc.h:81
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:524
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:225
ALIGN
#define ALIGN
Definition: hashtable.c:32
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:823
init_qscale_tab
static void init_qscale_tab(MPVEncContext *const s)
init s->c.cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:241
ff_mpv_init_duplicate_contexts
av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:99
update_noise_reduction
static void update_noise_reduction(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1891
MBBackup::mv_bits
int mv_bits
Definition: mpegvideo_enc.c:2642
MPVUnquantDSPContext::dct_unquantize_mpeg2_intra
void(* dct_unquantize_mpeg2_intra)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:39
mem_internal.h
MPVMainEncContext::dct_error_sum_base
char * dct_error_sum_base
backs dct_error_sum
Definition: mpegvideoenc.h:273
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:309
MBBackup::misc_bits
int misc_bits
Definition: mpegvideo_enc.c:2642
MpegEncContext::dc_val
int16_t * dc_val
used for H.263 AIC/MPEG-4 DC prediction and ER
Definition: mpegvideo.h:141
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
BUF_BITS
#define BUF_BITS
Definition: put_bits.h:47
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1285
set_frame_distances
static void set_frame_distances(MPVEncContext *const s)
Definition: mpegvideo_enc.c:3690
thread.h
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1913
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_speedhq_end_slice
void ff_speedhq_end_slice(MPVEncContext *const s)
Definition: speedhqenc.c:118
MBBackup::block_last_index
int block_last_index[8]
Definition: mpegvideo_enc.c:2637
estimate_qp
static int estimate_qp(MPVMainEncContext *const m, int dry_run)
Definition: mpegvideo_enc.c:3651
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *const m)
Definition: msmpeg4enc.c:673
matrix
Definition: vc1dsp.c:43
src1
const pixel * src1
Definition: h264pred_template.c:420
MPVEncContext::c
MpegEncContext c
the common base context
Definition: mpegvideoenc.h:47
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:213
MBBackup::last_mv
int last_mv[2][2][2]
Definition: mpegvideo_enc.c:2633
MPVMainEncContext::total_bits
int64_t total_bits
Definition: mpegvideoenc.h:255
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
ff_dct_encode_init
av_cold void ff_dct_encode_init(MPVEncContext *const s)
Definition: mpegvideo_enc.c:298
MPVMainEncContext::noise_reduction
int noise_reduction
Definition: mpegvideoenc.h:233
COPY
#define COPY(a)
ff_me_init_pic
void ff_me_init_pic(MPVEncContext *const s)
Definition: motion_est.c:371
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4268
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:956
estimate_best_b_count
static int estimate_best_b_count(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1516
MPVMainEncContext::last_lambda_for
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideoenc.h:261
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:165
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2834
ff_clean_h263_qscales
void ff_clean_h263_qscales(MPVEncContext *s)
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:816
out_size
static int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:170
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
sse
static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2768
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:294
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:139
MPVMainEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideoenc.h:245
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:154
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:529
AVFrame::width
int width
Definition: frame.h:499
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:191
internal.h
MBBackup::last_bits
int last_bits
Definition: mpegvideo_enc.c:2642
AVPacket::data
uint8_t * data
Definition: packet.h:588
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:379
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:42
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:110
MPVEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideoenc.h:52
data
const char data[16]
Definition: mxf.c:149
MPVMainEncContext::dts_delta
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideoenc.h:219
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
write_slice_end
static void write_slice_end(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2881
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:472
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:606
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:311
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:306
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2816
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2249
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
COPY_CONTEXT
#define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE)
Definition: mpegvideo_enc.c:2649
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:944
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:56
MPVMainEncContext::gop_size
int gop_size
Definition: mpegvideoenc.h:206
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1249
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:331
MPVMainEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideoenc.h:269
mpegutils.h
pix
enum AVPixelFormat pix
Definition: ohcodec.c:55
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:217
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:583
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:643
AVCodecContext::mb_cmp
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:870
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
MPVMainEncContext::encode_picture_header
int(* encode_picture_header)(struct MPVMainEncContext *m)
Definition: mpegvideoenc.h:251
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:305
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:169
MECmpContext::sum_abs_dctelem
int(* sum_abs_dctelem)(const int16_t *block)
Definition: me_cmp.h:51
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
update_mb_info
static void update_mb_info(MPVEncContext *const s, int startcode)
Definition: mpegvideo_enc.c:2921
MBBackup::i_tex_bits
int i_tex_bits
Definition: mpegvideo_enc.c:2642
MPVMainEncContext::coded_picture_number
int coded_picture_number
used to set pic->coded_picture_number
Definition: mpegvideoenc.h:210
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
set_bframe_chain_length
static int set_bframe_chain_length(MPVMainEncContext *const m)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1668
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:380
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
Performs dequantization and IDCT (if necessary)
Definition: mpegvideo_enc.c:1172
MBBackup::tex_pb
PutBitContext tex_pb
Definition: mpegvideo_enc.c:2646
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:886
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1930
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:339
sp5x.h
MBBackup::mb_skip_run
int mb_skip_run
Definition: mpegvideo_enc.c:2640
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:58
init_slice_buffers
static av_cold int init_slice_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:499
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FDCTDSPContext
Definition: fdctdsp.h:28
MPVMainEncContext::b_sensitivity
int b_sensitivity
Definition: mpegvideoenc.h:228
faandct.h
Floating point AAN DCT.
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:197
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:55
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:843
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:448
mpeg12enc.h
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:182
MBBackup::interlaced_dct
int interlaced_dct
Definition: mpegvideo_enc.c:2638
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
ff_vbv_update
int ff_vbv_update(MPVMainEncContext *m, int frame_size)
Definition: ratecontrol.c:718
fail
#define fail()
Definition: checkasm.h:219
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:59
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
ff_h263_encode_init
void ff_h263_encode_init(MPVMainEncContext *m)
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:961
MPVUnquantDSPContext::dct_unquantize_mpeg2_inter
void(* dct_unquantize_mpeg2_inter)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:41
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:496
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:296
perm
perm
Definition: f_perms.c:75
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
weight
const h264_weight_func weight
Definition: h264dsp_init.c:33
MPVMainEncContext::input_picture
MPVPicture * input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in display order
Definition: mpegvideoenc.h:212
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1217
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:314
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:314
get_intra_count
static int get_intra_count(MPVEncContext *const s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1254
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1287
sse_mb
static int sse_mb(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2790
ff_encode_add_stats_side_data
int ff_encode_add_stats_side_data(AVPacket *pkt, int quality, const int64_t error[], int error_count, enum AVPictureType pict_type)
Definition: encode.c:919
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:298
ff_sqrt
#define ff_sqrt
Definition: mathops.h:220
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_mpeg1_encode_init
static void ff_mpeg1_encode_init(MPVEncContext *s)
Definition: mpeg12enc.h:33
init_matrices
static av_cold int init_matrices(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:373
AVRational::num
int num
Numerator.
Definition: rational.h:59
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:145
refstruct.h
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:310
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:298
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:303
MPVMainEncContext::frame_skip_factor
int frame_skip_factor
Definition: mpegvideoenc.h:246
skip_check
static int skip_check(MPVMainEncContext *const m, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1453
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
MPVMainEncContext::stuffing_bits
int stuffing_bits
bits used for stuffing
Definition: mpegvideoenc.h:258
MPVMainEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideoenc.h:208
RateControlContext
rate control context.
Definition: ratecontrol.h:60
RateControlContext::num_entries
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:61
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MPVEncContext *s, int mb_line)
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
me_cmp_func
int(* me_cmp_func)(MPVEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:45
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:93
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:119
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:89
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4270
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:705
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
MPVMainEncContext::tmp_frames
AVFrame * tmp_frames[MPVENC_MAX_B_FRAMES+2]
temporary frames used by b_frame_strategy = 2
Definition: mpegvideoenc.h:226
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1240
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MPVMainEncContext *const m)
Definition: rv10enc.c:34
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:122
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:265
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
MPVMainEncContext::brd_scale
int brd_scale
Definition: mpegvideoenc.h:229
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MBBackup::esc3_level_length
int esc3_level_length
Definition: mpegvideo_enc.c:2644
MPVMainEncContext::reordered_input_picture
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
Definition: mpegvideoenc.h:213
MPVMainEncContext::intra_only
int intra_only
if true, only intra pictures are generated
Definition: mpegvideoenc.h:205
MPVMainEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideoenc.h:270
merge_context_after_me
static void merge_context_after_me(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3616
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:835
MPVMainEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideoenc.h:264
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
MPVEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideoenc.h:50
MPVMainEncContext::header_bits
int header_bits
Definition: mpegvideoenc.h:257
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1561
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:107
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:225
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
MPVMainEncContext::max_b_frames
int max_b_frames
max number of B-frames
Definition: mpegvideoenc.h:207
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1067
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:270
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1278
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4616
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1513
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:282
PutBitContext
Definition: put_bits.h:50
ff_speedhq_mb_y_order_to_mb
static int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.h:41
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:299
MBBackup::mv_dir
int mv_dir
Definition: mpegvideo_enc.c:2634
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:837
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2859
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:57
arg
const char * arg
Definition: jacosubdec.c:65
mpv_encode_init_static
static av_cold void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:269
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:428
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1263
MECmpContext
Definition: me_cmp.h:50
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:304
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
MPVEncContext::block
int16_t(* block)[64]
points into blocks below
Definition: mpegvideoenc.h:114
NULL
#define NULL
Definition: coverity.c:32
MPVEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideoenc.h:126
MPVMainEncContext::lmin
int lmin
Definition: mpegvideoenc.h:236
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:599
run
uint8_t run
Definition: svq3.c:207
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:81
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:171
MPVUnquantDSPContext::dct_unquantize_mpeg1_intra
void(* dct_unquantize_mpeg1_intra)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:35
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:116
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
Definition: ratecontrol.c:908
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:300
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
MECmpContext::sad
me_cmp_func sad[6]
Definition: me_cmp.h:53
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:489
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
MAX_AC_TEX_MB_SIZE
@ MAX_AC_TEX_MB_SIZE
Definition: mpeg4videoenc.h:39
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
MPVEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideoenc.h:53
me_cmp_init
static av_cold int me_cmp_init(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:330
select_input_picture
static int select_input_picture(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1797
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:505
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:279
mathops.h
dct_error
static int dct_error(const struct algo *dct, int test, int is_idct, int speed, const int bits)
Definition: dct.c:188
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:327
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3615
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:876
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
MAX_PB2_MB_SIZE
@ MAX_PB2_MB_SIZE
Definition: mpeg4videoenc.h:38
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1116
MBBackup::qscale
int qscale
Definition: mpegvideo_enc.c:2636
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:945
qpeldsp.h
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2944
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MPVEncContext *const s)
Definition: h261enc.c:118
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
ff_mpv_unquantize_init
#define ff_mpv_unquantize_init(s, bitexact, q_scale_type)
Definition: mpegvideo_unquantize.h:50
add_dequant_dct
static void add_dequant_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1159
MpegEncContext::ac_val
int16_t(* ac_val)[16]
used for H.263 AIC, MPEG-4 AC prediction
Definition: mpegvideo.h:142
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1313
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1326
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Average and put pixel Widths can be 16, 8, 4 or 2.
Definition: hpeldsp.h:39
MBBackup::block
int16_t(* block)[64]
Definition: mpegvideo_enc.c:2645
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MPVEncContext *const dst, const MPVEncContext *const src)
Definition: mpegvideo_enc.c:253
MPVMainEncContext
Definition: mpegvideoenc.h:202
AVOnce
#define AVOnce
Definition: thread.h:202
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:173
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:823
load_input_picture
static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1313
MPVUnquantDSPContext::dct_unquantize_h263_inter
void(* dct_unquantize_h263_inter)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:45
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:436
ff_set_mpeg4_time
void ff_set_mpeg4_time(MPVEncContext *const s)
Definition: mpeg4videoenc.c:843
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:543
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:838
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1388
ff_h263_clean_intra_table_entries
static void ff_h263_clean_intra_table_entries(MpegEncContext *s, int xy)
Definition: h263.h:47
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1320
MPVMainEncContext::last_pict_type
int last_pict_type
Definition: mpegvideoenc.h:262
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:225
f
f
Definition: af_crystalizer.c:122
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:519
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:80
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:57
clip_coeffs
static void clip_coeffs(const MPVEncContext *const s, int16_t block[], int last_index)
Definition: mpegvideo_enc.c:2215
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:589
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1013
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:44
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
MPVUnquantDSPContext::dct_unquantize_mpeg1_inter
void(* dct_unquantize_mpeg1_inter)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:37
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
MPVMainEncContext::vbv_delay_pos
int vbv_delay_pos
offset of vbv_delay in the bitstream
Definition: mpegvideoenc.h:240
MECmpContext::sse
me_cmp_func sse[6]
Definition: me_cmp.h:54
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
MBBackup::mv_type
int mv_type
Definition: mpegvideo_enc.c:2634
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:549
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:335
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:308
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:293
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:95
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
MPVMainEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideoenc.h:256
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:313
encode_picture
static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3705
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:292
MECmpContext::nsse
me_cmp_func nsse[6]
Definition: me_cmp.h:62
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:443
MPVEncContext::me
MotionEstContext me
Definition: mpegvideoenc.h:78
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
Definition: mpegvideo_enc.c:372
MPVMainEncContext::input_picture_number
int input_picture_number
used to set pic->display_picture_number
Definition: mpegvideoenc.h:209
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MPVEncContext *s)
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:175
MBBackup::c
struct MBBackup::@221 c
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:594
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:297
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:38
MPVMainEncContext::me_penalty_compensation
int me_penalty_compensation
Definition: mpegvideoenc.h:266
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:289
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:306
MBBackup
Definition: mpegvideo_enc.c:2630
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:295
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:137
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:266
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:312
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
get_bits_diff
static int get_bits_diff(MPVEncContext *s)
Definition: mpegvideoenc.h:411
MPVUnquantDSPContext::dct_unquantize_h263_intra
void(* dct_unquantize_h263_intra)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:43
MBBackup::last_dc
int last_dc[3]
Definition: mpegvideo_enc.c:2641
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:264
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
MBBackup::mv
int mv[2][4][2]
Definition: mpegvideo_enc.c:2632
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:581
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:90
ff_rv20_encode_picture_header
int ff_rv20_encode_picture_header(MPVMainEncContext *m)
Definition: rv20enc.c:37
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2978
MPVEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideoenc.h:80
MPVMainEncContext::mv_table_base
int16_t(* mv_table_base)[2]
Definition: mpegvideoenc.h:274
MBBackup::pb2
PutBitContext pb2
Definition: mpegvideo_enc.c:2646
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
FF_MATRIX_TYPE_CHROMA_INTRA
#define FF_MATRIX_TYPE_CHROMA_INTRA
Definition: encode.h:120
ff_h263_update_mb
void ff_h263_update_mb(MPVEncContext *s)
MPVEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideoenc.h:166
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:979
src2
const pixel * src2
Definition: h264pred_template.c:421
MPVEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideoenc.h:128
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:287
ff_rate_control_init
av_cold int ff_rate_control_init(MPVMainEncContext *const m)
Definition: ratecontrol.c:496
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:63
MPVENC_MAX_B_FRAMES
#define MPVENC_MAX_B_FRAMES
Definition: mpegvideoenc.h:44
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
mpv_encode_defaults
static av_cold void mpv_encode_defaults(MPVMainEncContext *const m)
Set the given MPVEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:278
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
MPVMainEncContext::next_lambda
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideoenc.h:259
AV_STRINGIFY
#define AV_STRINGIFY(s)
Definition: macros.h:66
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:887
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
MPVMainEncContext::border_masking
float border_masking
Definition: mpegvideoenc.h:235
ff_write_pass1_stats
void ff_write_pass1_stats(MPVMainEncContext *const m)
Definition: ratecontrol.c:37
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MPVEncContext *const s)
Definition: msmpeg4enc.c:285
ff_square_tab
const EXTERN uint32_t ff_square_tab[512]
Definition: mathops.h:35
MPVMainEncContext::last_non_b_pict_type
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideoenc.h:263
AVCodecContext::height
int height
Definition: avcodec.h:600
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:491
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
prepare_picture
static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1281
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
ff_get_2pass_fcode
void ff_get_2pass_fcode(MPVMainEncContext *const m)
Definition: ratecontrol.c:896
frame_end
static void frame_end(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1855
idctdsp.h
MPVPicture::b_frame_score
int b_frame_score
Definition: mpegpicture.h:84
encode_mb_internal
static av_always_inline void encode_mb_internal(MPVEncContext *const s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2273
avcodec.h
init_buffers
static av_cold int init_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:441
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, int bits_per_raw_sample)
Definition: pixblockdsp.c:87
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:332
ret
ret
Definition: filter_design.txt:187
ff_h263_mpeg4_reset_dc
void ff_h263_mpeg4_reset_dc(MPVEncContext *s)
MPVMainEncContext::vbv_ignore_qmax
int vbv_ignore_qmax
Definition: mpegvideoenc.h:237
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:303
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1365
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
MPVEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideoenc.h:75
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:97
ff_convert_matrix
void ff_convert_matrix(MPVEncContext *const s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:108
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVMainEncContext::reordered_pts
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideoenc.h:223
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:55
mpeg12data.h
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:844
MPVMainEncContext::frame_skip_cmp
int frame_skip_cmp
Definition: mpegvideoenc.h:248
MBBackup::dquant
int dquant
Definition: mpegvideo_enc.c:2643
AVCodecContext
main external API structure.
Definition: avcodec.h:439
AVFrame::height
int height
Definition: frame.h:499
MBBackup::mb_skipped
int mb_skipped
Definition: mpegvideo_enc.c:2635
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:402
MPVMainEncContext::lmax
int lmax
Definition: mpegvideoenc.h:236
ADD
#define ADD(field)
Definition: mpegvideo_enc.c:3614
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:231
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1242
AVRational::den
int den
Denominator.
Definition: rational.h:60
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MPVEncContext *const s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:238
MBBackup::i_count
int i_count
Definition: mpegvideo_enc.c:2642
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:830
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
temp
else temp
Definition: vf_mcdeint.c:271
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:861
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MPVMainEncContext::frame_skip_exp
int frame_skip_exp
Definition: mpegvideoenc.h:247
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:83
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:947
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:892
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
ff_mpeg1_clean_buffers
static void ff_mpeg1_clean_buffers(MPVEncContext *s)
Definition: mpeg12enc.h:39
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:290
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:777
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
denoise_dct
static void denoise_dct(MPVEncContext *const s, int16_t block[])
Definition: mpegvideo_enc.c:3946
dct_quantize_refine
static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4289
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:553
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1292
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1300
merge_context_after_encode
static void merge_context_after_encode(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3623
w
uint8_t w
Definition: llvidencdsp.c:39
MPVMainEncContext::b_frame_strategy
int b_frame_strategy
Definition: mpegvideoenc.h:227
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:278
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:168
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1029
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:946
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AVPacket
This structure stores compressed data.
Definition: packet.h:565
mpeg4videodata.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:965
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:276
MPVMainEncContext::scenechange_threshold
int scenechange_threshold
Definition: mpegvideoenc.h:231
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MPVEncContext *s)
Definition: mpegvideoenc.c:56
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:461
MBBackup::mb_intra
int mb_intra
Definition: mpegvideo_enc.c:2635
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
MPVUnquantDSPContext
Definition: mpegvideo_unquantize.h:34
h
h
Definition: vp9dsp_template.c:2070
MPVMainEncContext::user_specified_pts
int64_t user_specified_pts
last non-zero pts from user-supplied AVFrame
Definition: mpegvideoenc.h:215
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:888
dct_quantize_c
static int dct_quantize_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4641
stride
#define stride
Definition: h264pred_template.c:536
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:150
pkt
static AVPacket * pkt
Definition: demux_decode.c:55
MBBackup::pb
PutBitContext pb
Definition: mpegvideo_enc.c:2646
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:226
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:312
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:281
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
update_qscale
static void update_qscale(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:195
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:203
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:624
write_mb_info
static void write_mb_info(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2901
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
MBBackup::p_tex_bits
int p_tex_bits
Definition: mpegvideo_enc.c:2642
pixblockdsp.h
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:968
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
ff_check_codec_matrices
int ff_check_codec_matrices(AVCodecContext *avctx, unsigned types, uint16_t min, uint16_t max)
Definition: encode.c:944
FF_MATRIX_TYPE_INTER
#define FF_MATRIX_TYPE_INTER
Definition: encode.h:119
h263.h
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:709
ff_get_best_fcode
int ff_get_best_fcode(MPVMainEncContext *const m, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1603
intmath.h
MPVEncContext::mpeg_quant
int mpeg_quant
Definition: mpegvideoenc.h:167