FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <assert.h>
37 #include <stdint.h>
38 
39 #include "libavutil/emms.h"
40 #include "libavutil/internal.h"
41 #include "libavutil/intmath.h"
42 #include "libavutil/mathematics.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/mem_internal.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/thread.h"
47 #include "avcodec.h"
48 #include "encode.h"
49 #include "idctdsp.h"
50 #include "mpeg12data.h"
51 #include "mpeg12enc.h"
52 #include "mpegvideo.h"
53 #include "mpegvideodata.h"
54 #include "mpegvideoenc.h"
55 #include "h261enc.h"
56 #include "h263.h"
57 #include "h263data.h"
58 #include "h263enc.h"
59 #include "mjpegenc_common.h"
60 #include "mathops.h"
61 #include "mpegutils.h"
62 #include "mpegvideo_unquantize.h"
63 #include "mjpegenc.h"
64 #include "speedhqenc.h"
65 #include "msmpeg4enc.h"
66 #include "pixblockdsp.h"
67 #include "qpeldsp.h"
68 #include "faandct.h"
69 #include "aandcttab.h"
70 #include "mpeg4video.h"
71 #include "mpeg4videodata.h"
72 #include "mpeg4videoenc.h"
73 #include "internal.h"
74 #include "bytestream.h"
75 #include "rv20enc.h"
76 #include "libavutil/refstruct.h"
77 #include <limits.h>
78 #include "sp5x.h"
79 
80 #define QUANT_BIAS_SHIFT 8
81 
82 #define QMAT_SHIFT_MMX 16
83 #define QMAT_SHIFT 21
84 
85 static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt);
86 static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
87 static int sse_mb(MPVEncContext *const s);
88 static int dct_quantize_c(MPVEncContext *const s,
89  int16_t *block, int n,
90  int qscale, int *overflow);
91 static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow);
92 
93 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
94 
95 static const AVOption mpv_generic_options[] = {
98  { NULL },
99 };
100 
102  .class_name = "generic mpegvideo encoder",
103  .item_name = av_default_item_name,
104  .option = mpv_generic_options,
105  .version = LIBAVUTIL_VERSION_INT,
106 };
107 
108 void ff_convert_matrix(MPVEncContext *const s, int (*qmat)[64],
109  uint16_t (*qmat16)[2][64],
110  const uint16_t *quant_matrix,
111  int bias, int qmin, int qmax, int intra)
112 {
113  FDCTDSPContext *fdsp = &s->fdsp;
114  int qscale;
115  int shift = 0;
116 
117  for (qscale = qmin; qscale <= qmax; qscale++) {
118  int i;
119  int qscale2;
120 
121  if (s->c.q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
122  else qscale2 = qscale << 1;
123 
124  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
125 #if CONFIG_FAANDCT
126  fdsp->fdct == ff_faandct ||
127 #endif /* CONFIG_FAANDCT */
129  for (i = 0; i < 64; i++) {
130  const int j = s->c.idsp.idct_permutation[i];
131  int64_t den = (int64_t) qscale2 * quant_matrix[j];
132  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
133  * Assume x = qscale2 * quant_matrix[j]
134  * 1 <= x <= 28560
135  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
136  * 4194304 >= (1 << 22) / (x) >= 146 */
137 
138  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
139  }
140  } else if (fdsp->fdct == ff_fdct_ifast) {
141  for (i = 0; i < 64; i++) {
142  const int j = s->c.idsp.idct_permutation[i];
143  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
144  /* 1247 * 1 * 1 <= ff_aanscales[i] * qscale2 * quant_matrix[j] <= 31521 * 112 * 255
145  * Assume x = ff_aanscales[i] * qscale2 * quant_matrix[j]
146  * 1247 <= x <= 900239760
147  * (1 << 36) / 1247 >= (1 << 36) / (x) >= (1 << 36) / 900239760
148  * 55107840 >= (1 << 36) / (x) >= 76 */
149 
150  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
151  }
152  } else {
153  for (i = 0; i < 64; i++) {
154  const int j = s->c.idsp.idct_permutation[i];
155  int64_t den = (int64_t) qscale2 * quant_matrix[j];
156  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
157  * Assume x = qscale2 * quant_matrix[j]
158  * 1 <= x <= 28560
159  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
160  * 4194304 >= (1 << 22) / (x) >= 146
161  *
162  * 1 <= x <= 28560
163  * (1 << 17) / 1 >= (1 << 17) / (x) >= (1 << 17) / 28560
164  * 131072 >= (1 << 17) / (x) >= 4 */
165 
166  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
167  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
168 
169  if (qmat16[qscale][0][i] == 0 ||
170  qmat16[qscale][0][i] == 128 * 256)
171  qmat16[qscale][0][i] = 128 * 256 - 1;
172  qmat16[qscale][1][i] =
173  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
174  qmat16[qscale][0][i]);
175  }
176  }
177 
178  for (i = intra; i < 64; i++) {
179  int64_t max = 8191;
180  if (fdsp->fdct == ff_fdct_ifast) {
181  max = (8191LL * ff_aanscales[i]) >> 14;
182  }
183  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
184  shift++;
185  }
186  }
187  }
188  if (shift) {
189  av_log(s->c.avctx, AV_LOG_INFO,
190  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
191  QMAT_SHIFT - shift);
192  }
193 }
194 
195 static inline void update_qscale(MPVMainEncContext *const m)
196 {
197  MPVEncContext *const s = &m->s;
198 
199  if (s->c.q_scale_type == 1 && 0) {
200  int i;
201  int bestdiff=INT_MAX;
202  int best = 1;
203 
204  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
205  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
206  if (ff_mpeg2_non_linear_qscale[i] < s->c.avctx->qmin ||
207  (ff_mpeg2_non_linear_qscale[i] > s->c.avctx->qmax && !m->vbv_ignore_qmax))
208  continue;
209  if (diff < bestdiff) {
210  bestdiff = diff;
211  best = i;
212  }
213  }
214  s->c.qscale = best;
215  } else {
216  s->c.qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
217  (FF_LAMBDA_SHIFT + 7);
218  s->c.qscale = av_clip(s->c.qscale, s->c.avctx->qmin, m->vbv_ignore_qmax ? 31 : s->c.avctx->qmax);
219  }
220 
221  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
223 }
224 
226 {
227  int i;
228 
229  if (matrix) {
230  put_bits(pb, 1, 1);
231  for (i = 0; i < 64; i++) {
233  }
234  } else
235  put_bits(pb, 1, 0);
236 }
237 
238 /**
239  * init s->c.cur_pic.qscale_table from s->lambda_table
240  */
241 static void init_qscale_tab(MPVEncContext *const s)
242 {
243  int8_t *const qscale_table = s->c.cur_pic.qscale_table;
244 
245  for (int i = 0; i < s->c.mb_num; i++) {
246  unsigned int lam = s->lambda_table[s->c.mb_index2xy[i]];
247  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
248  qscale_table[s->c.mb_index2xy[i]] = av_clip(qp, s->c.avctx->qmin,
249  s->c.avctx->qmax);
250  }
251 }
252 
254  const MPVEncContext *const src)
255 {
256 #define COPY(a) dst->a = src->a
257  COPY(c.pict_type);
258  COPY(f_code);
259  COPY(b_code);
260  COPY(c.qscale);
261  COPY(lambda);
262  COPY(lambda2);
263  COPY(c.frame_pred_frame_dct); // FIXME don't set in encode_header
264  COPY(c.progressive_frame); // FIXME don't set in encode_header
265  COPY(partitioned_frame); // FIXME don't set in encode_header
266 #undef COPY
267 }
268 
270 {
271  for (int i = -16; i < 16; i++)
272  default_fcode_tab[i + MAX_MV] = 1;
273 }
274 
275 /**
276  * Set the given MPVEncContext to defaults for encoding.
277  */
279 {
280  MPVEncContext *const s = &m->s;
281  static AVOnce init_static_once = AV_ONCE_INIT;
282 
284 
285  s->f_code = 1;
286  s->b_code = 1;
287 
288  if (!m->fcode_tab) {
290  ff_thread_once(&init_static_once, mpv_encode_init_static);
291  }
292  if (!s->c.y_dc_scale_table) {
293  s->c.y_dc_scale_table =
294  s->c.c_dc_scale_table = ff_mpeg1_dc_scale_table;
295  }
296 }
297 
299 {
300  s->dct_quantize = dct_quantize_c;
301 
302 #if ARCH_X86
304 #endif
305 
306  if (s->c.avctx->trellis)
307  s->dct_quantize = dct_quantize_trellis_c;
308 }
309 
310 static av_cold void init_unquantize(MPVEncContext *const s2, AVCodecContext *avctx)
311 {
312  MpegEncContext *const s = &s2->c;
313  MPVUnquantDSPContext unquant_dsp_ctx;
314 
315  ff_mpv_unquantize_init(&unquant_dsp_ctx,
316  avctx->flags & AV_CODEC_FLAG_BITEXACT, s->q_scale_type);
317 
318  if (s2->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
319  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg2_intra;
320  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg2_inter;
321  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
322  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_h263_intra;
323  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_h263_inter;
324  } else {
325  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg1_intra;
326  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg1_inter;
327  }
328 }
329 
331 {
332  MPVEncContext *const s = &m->s;
333  MECmpContext mecc;
334  me_cmp_func me_cmp[6];
335  int ret;
336 
337  ff_me_cmp_init(&mecc, avctx);
338  ret = ff_me_init(&s->me, avctx, &mecc, 1);
339  if (ret < 0)
340  return ret;
341  ret = ff_set_cmp(&mecc, me_cmp, m->frame_skip_cmp, 1);
342  if (ret < 0)
343  return ret;
344  m->frame_skip_cmp_fn = me_cmp[1];
345  if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
346  ret = ff_set_cmp(&mecc, me_cmp, avctx->ildct_cmp, 1);
347  if (ret < 0)
348  return ret;
349  if (!me_cmp[0] || !me_cmp[4])
350  return AVERROR(EINVAL);
351  s->ildct_cmp[0] = me_cmp[0];
352  s->ildct_cmp[1] = me_cmp[4];
353  }
354 
355  s->sum_abs_dctelem = mecc.sum_abs_dctelem;
356 
357  s->sse_cmp[0] = mecc.sse[0];
358  s->sse_cmp[1] = mecc.sse[1];
359  s->sad_cmp[0] = mecc.sad[0];
360  s->sad_cmp[1] = mecc.sad[1];
361  if (avctx->mb_cmp == FF_CMP_NSSE) {
362  s->n_sse_cmp[0] = mecc.nsse[0];
363  s->n_sse_cmp[1] = mecc.nsse[1];
364  } else {
365  s->n_sse_cmp[0] = mecc.sse[0];
366  s->n_sse_cmp[1] = mecc.sse[1];
367  }
368 
369  return 0;
370 }
371 
372 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
374 {
375  MPVEncContext *const s = &m->s;
376  const int nb_matrices = 1 + (s->c.out_format == FMT_MJPEG) + !m->intra_only;
377  const uint16_t *intra_matrix, *inter_matrix;
378  int ret;
379 
380  if (!ALLOCZ_ARRAYS(s->q_intra_matrix, 32, nb_matrices) ||
381  !ALLOCZ_ARRAYS(s->q_intra_matrix16, 32, nb_matrices))
382  return AVERROR(ENOMEM);
383 
384  if (s->c.out_format == FMT_MJPEG) {
385  s->q_chroma_intra_matrix = s->q_intra_matrix + 32;
386  s->q_chroma_intra_matrix16 = s->q_intra_matrix16 + 32;
387  // No need to set q_inter_matrix
389  // intra_matrix, chroma_intra_matrix will be set later for MJPEG.
390  return 0;
391  } else {
392  s->q_chroma_intra_matrix = s->q_intra_matrix;
393  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
394  }
395  if (!m->intra_only) {
396  s->q_inter_matrix = s->q_intra_matrix + 32;
397  s->q_inter_matrix16 = s->q_intra_matrix16 + 32;
398  }
399 
400  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 &&
401  s->mpeg_quant) {
402  intra_matrix = ff_mpeg4_default_intra_matrix;
403  inter_matrix = ff_mpeg4_default_non_intra_matrix;
404  } else if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
405  intra_matrix =
406  inter_matrix = ff_mpeg1_default_non_intra_matrix;
407  } else {
408  /* MPEG-1/2, SpeedHQ */
409  intra_matrix = ff_mpeg1_default_intra_matrix;
410  inter_matrix = ff_mpeg1_default_non_intra_matrix;
411  }
412  if (avctx->intra_matrix)
413  intra_matrix = avctx->intra_matrix;
414  if (avctx->inter_matrix)
415  inter_matrix = avctx->inter_matrix;
416 
417  /* init q matrix */
418  for (int i = 0; i < 64; i++) {
419  int j = s->c.idsp.idct_permutation[i];
420 
421  s->c.intra_matrix[j] = s->c.chroma_intra_matrix[j] = intra_matrix[i];
422  s->c.inter_matrix[j] = inter_matrix[i];
423  }
424 
425  /* precompute matrix */
427  if (ret < 0)
428  return ret;
429 
430  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
431  s->c.intra_matrix, s->intra_quant_bias, avctx->qmin,
432  31, 1);
433  if (s->q_inter_matrix)
434  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
435  s->c.inter_matrix, s->inter_quant_bias, avctx->qmin,
436  31, 0);
437 
438  return 0;
439 }
440 
442 {
443  MPVEncContext *const s = &m->s;
444  int has_b_frames = !!m->max_b_frames;
445  int16_t (*mv_table)[2];
446 
447  /* Allocate MB type table */
448  unsigned mb_array_size = s->c.mb_stride * s->c.mb_height;
449  s->mb_type = av_calloc(mb_array_size, 3 * sizeof(*s->mb_type) + sizeof(*s->mb_mean));
450  if (!s->mb_type)
451  return AVERROR(ENOMEM);
452  s->mc_mb_var = s->mb_type + mb_array_size;
453  s->mb_var = s->mc_mb_var + mb_array_size;
454  s->mb_mean = (uint8_t*)(s->mb_var + mb_array_size);
455 
456  if (!FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size))
457  return AVERROR(ENOMEM);
458 
459  unsigned mv_table_size = (s->c.mb_height + 2) * s->c.mb_stride + 1;
460  unsigned nb_mv_tables = 1 + 5 * has_b_frames;
461  if (s->c.codec_id == AV_CODEC_ID_MPEG4 ||
462  (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
463  nb_mv_tables += 8 * has_b_frames;
464  s->p_field_select_table[0] = av_calloc(mv_table_size, 2 * (2 + 4 * has_b_frames));
465  if (!s->p_field_select_table[0])
466  return AVERROR(ENOMEM);
467  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
468  }
469 
470  mv_table = av_calloc(mv_table_size, nb_mv_tables * sizeof(*mv_table));
471  if (!mv_table)
472  return AVERROR(ENOMEM);
473  m->mv_table_base = mv_table;
474  mv_table += s->c.mb_stride + 1;
475 
476  s->p_mv_table = mv_table;
477  if (has_b_frames) {
478  s->b_forw_mv_table = mv_table += mv_table_size;
479  s->b_back_mv_table = mv_table += mv_table_size;
480  s->b_bidir_forw_mv_table = mv_table += mv_table_size;
481  s->b_bidir_back_mv_table = mv_table += mv_table_size;
482  s->b_direct_mv_table = mv_table += mv_table_size;
483 
484  if (s->p_field_select_table[1]) { // MPEG-4 or INTERLACED_ME above
485  uint8_t *field_select = s->p_field_select_table[1];
486  for (int j = 0; j < 2; j++) {
487  for (int k = 0; k < 2; k++) {
488  for (int l = 0; l < 2; l++)
489  s->b_field_mv_table[j][k][l] = mv_table += mv_table_size;
490  s->b_field_select_table[j][k] = field_select += 2 * mv_table_size;
491  }
492  }
493  }
494  }
495 
496  return 0;
497 }
498 
500 {
501  MPVEncContext *const s = &m->s;
502  // Align the following per-thread buffers to avoid false sharing.
503  enum {
504 #ifndef _MSC_VER
505  /// The number is supposed to match/exceed the cache-line size.
506  ALIGN = FFMAX(128, _Alignof(max_align_t)),
507 #else
508  ALIGN = 128,
509 #endif
510  DCT_ERROR_SIZE = FFALIGN(2 * sizeof(*s->dct_error_sum), ALIGN),
511  };
512  static_assert(DCT_ERROR_SIZE * MAX_THREADS + ALIGN - 1 <= SIZE_MAX,
513  "Need checks for potential overflow.");
514  unsigned nb_slices = s->c.slice_context_count;
515  char *dct_error = NULL;
516 
517  if (m->noise_reduction) {
518  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
519  return AVERROR(ENOMEM);
520  dct_error = av_mallocz(ALIGN - 1 + nb_slices * DCT_ERROR_SIZE);
521  if (!dct_error)
522  return AVERROR(ENOMEM);
524  dct_error += FFALIGN((uintptr_t)dct_error, ALIGN) - (uintptr_t)dct_error;
525  }
526 
527  const int y_size = s->c.b8_stride * (2 * s->c.mb_height + 1);
528  const int c_size = s->c.mb_stride * (s->c.mb_height + 1);
529  const int yc_size = y_size + 2 * c_size;
530  ptrdiff_t offset = 0;
531 
532  for (unsigned i = 0; i < nb_slices; ++i) {
533  MPVEncContext *const s2 = s->c.enc_contexts[i];
534 
535  s2->block = s2->blocks[0];
536 
537  if (dct_error) {
538  s2->dct_offset = s->dct_offset;
539  s2->dct_error_sum = (void*)dct_error;
540  dct_error += DCT_ERROR_SIZE;
541  }
542 
543  if (s2->c.ac_val) {
544  s2->c.dc_val += offset + i;
545  s2->c.ac_val += offset;
546  offset += yc_size;
547  }
548  }
549  return 0;
550 }
551 
552 /* init video encoder */
554 {
555  MPVMainEncContext *const m = avctx->priv_data;
556  MPVEncContext *const s = &m->s;
557  AVCPBProperties *cpb_props;
558  int gcd, ret;
559 
561 
562  switch (avctx->pix_fmt) {
563  case AV_PIX_FMT_YUVJ444P:
564  case AV_PIX_FMT_YUV444P:
565  s->c.chroma_format = CHROMA_444;
566  break;
567  case AV_PIX_FMT_YUVJ422P:
568  case AV_PIX_FMT_YUV422P:
569  s->c.chroma_format = CHROMA_422;
570  break;
571  default:
572  av_unreachable("Already checked via CODEC_PIXFMTS");
573  case AV_PIX_FMT_YUVJ420P:
574  case AV_PIX_FMT_YUV420P:
575  s->c.chroma_format = CHROMA_420;
576  break;
577  }
578 
579  avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
580 
581  m->bit_rate = avctx->bit_rate;
582  s->c.width = avctx->width;
583  s->c.height = avctx->height;
584  if (avctx->gop_size > 600 &&
586  av_log(avctx, AV_LOG_WARNING,
587  "keyframe interval too large!, reducing it from %d to %d\n",
588  avctx->gop_size, 600);
589  avctx->gop_size = 600;
590  }
591  m->gop_size = avctx->gop_size;
592  s->c.avctx = avctx;
593  if (avctx->max_b_frames > MPVENC_MAX_B_FRAMES) {
594  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
595  "is " AV_STRINGIFY(MPVENC_MAX_B_FRAMES) ".\n");
597  } else if (avctx->max_b_frames < 0) {
598  av_log(avctx, AV_LOG_ERROR,
599  "max b frames must be 0 or positive for mpegvideo based encoders\n");
600  return AVERROR(EINVAL);
601  }
602  m->max_b_frames = avctx->max_b_frames;
603  s->c.codec_id = avctx->codec->id;
604  if (m->max_b_frames && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
605  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
606  return AVERROR(EINVAL);
607  }
608 
609  s->c.quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
610  s->rtp_mode = !!s->rtp_payload_size;
612 
613  if (m->gop_size <= 1) {
614  m->intra_only = 1;
615  m->gop_size = 12;
616  } else {
617  m->intra_only = 0;
618  }
619 
620  /* Fixed QSCALE */
621  m->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
622 
623  s->adaptive_quant = (avctx->lumi_masking ||
624  avctx->dark_masking ||
625  avctx->temporal_cplx_masking ||
626  avctx->spatial_cplx_masking ||
627  avctx->p_masking ||
628  m->border_masking ||
629  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
630  !m->fixed_qscale;
631 
632  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
633 
634  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
635  switch(avctx->codec_id) {
638  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
639  break;
640  case AV_CODEC_ID_MPEG4:
644  if (avctx->rc_max_rate >= 15000000) {
645  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
646  } else if(avctx->rc_max_rate >= 2000000) {
647  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
648  } else if(avctx->rc_max_rate >= 384000) {
649  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
650  } else
651  avctx->rc_buffer_size = 40;
652  avctx->rc_buffer_size *= 16384;
653  break;
654  }
655  if (avctx->rc_buffer_size) {
656  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
657  }
658  }
659 
660  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
661  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
662  return AVERROR(EINVAL);
663  }
664 
665  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
666  av_log(avctx, AV_LOG_INFO,
667  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
668  }
669 
670  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
671  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
672  return AVERROR(EINVAL);
673  }
674 
675  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
676  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
677  return AVERROR(EINVAL);
678  }
679 
680  if (avctx->rc_max_rate &&
681  avctx->rc_max_rate == avctx->bit_rate &&
682  avctx->rc_max_rate != avctx->rc_min_rate) {
683  av_log(avctx, AV_LOG_INFO,
684  "impossible bitrate constraints, this will fail\n");
685  }
686 
687  if (avctx->rc_buffer_size &&
688  avctx->bit_rate * (int64_t)avctx->time_base.num >
689  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
690  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
691  return AVERROR(EINVAL);
692  }
693 
694  if (!m->fixed_qscale &&
695  avctx->bit_rate * av_q2d(avctx->time_base) >
696  avctx->bit_rate_tolerance) {
697  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
698  av_log(avctx, AV_LOG_WARNING,
699  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
700  if (nbt <= INT_MAX) {
701  avctx->bit_rate_tolerance = nbt;
702  } else
703  avctx->bit_rate_tolerance = INT_MAX;
704  }
705 
706  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->c.codec_id != AV_CODEC_ID_MPEG4 &&
707  s->c.codec_id != AV_CODEC_ID_H263 && s->c.codec_id != AV_CODEC_ID_H263P &&
708  s->c.codec_id != AV_CODEC_ID_FLV1) {
709  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
710  return AVERROR(EINVAL);
711  }
712 
713  if (s->c.obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
714  av_log(avctx, AV_LOG_ERROR,
715  "OBMC is only supported with simple mb decision\n");
716  return AVERROR(EINVAL);
717  }
718 
719  if (s->c.quarter_sample && s->c.codec_id != AV_CODEC_ID_MPEG4) {
720  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
721  return AVERROR(EINVAL);
722  }
723 
724  if ((s->c.codec_id == AV_CODEC_ID_MPEG4 ||
725  s->c.codec_id == AV_CODEC_ID_H263 ||
726  s->c.codec_id == AV_CODEC_ID_H263P) &&
727  (avctx->sample_aspect_ratio.num > 255 ||
728  avctx->sample_aspect_ratio.den > 255)) {
729  av_log(avctx, AV_LOG_WARNING,
730  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
733  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
734  }
735 
736  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
737  s->c.codec_id == AV_CODEC_ID_H263P) &&
738  (avctx->width > 2048 ||
739  avctx->height > 1152 )) {
740  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
741  return AVERROR(EINVAL);
742  }
743  if (s->c.codec_id == AV_CODEC_ID_FLV1 &&
744  (avctx->width > 65535 ||
745  avctx->height > 65535 )) {
746  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
747  return AVERROR(EINVAL);
748  }
749  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
750  s->c.codec_id == AV_CODEC_ID_H263P ||
751  s->c.codec_id == AV_CODEC_ID_RV20) &&
752  ((avctx->width &3) ||
753  (avctx->height&3) )) {
754  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
755  return AVERROR(EINVAL);
756  }
757 
758  if ((s->c.codec_id == AV_CODEC_ID_WMV1 ||
759  s->c.codec_id == AV_CODEC_ID_WMV2) &&
760  avctx->width & 1) {
761  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
762  return AVERROR(EINVAL);
763  }
764 
766  s->c.codec_id != AV_CODEC_ID_MPEG4 && s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO) {
767  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
768  return AVERROR(EINVAL);
769  }
770 
771  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
772  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
773  return AVERROR(EINVAL);
774  }
775 
776  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
777  avctx->mb_decision != FF_MB_DECISION_RD) {
778  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
779  return AVERROR(EINVAL);
780  }
781 
782  if (m->scenechange_threshold < 1000000000 &&
783  (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
784  av_log(avctx, AV_LOG_ERROR,
785  "closed gop with scene change detection are not supported yet, "
786  "set threshold to 1000000000\n");
787  return AVERROR_PATCHWELCOME;
788  }
789 
790  if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
791  if (s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO &&
793  av_log(avctx, AV_LOG_ERROR,
794  "low delay forcing is only available for mpeg2, "
795  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
796  return AVERROR(EINVAL);
797  }
798  if (m->max_b_frames != 0) {
799  av_log(avctx, AV_LOG_ERROR,
800  "B-frames cannot be used with low delay\n");
801  return AVERROR(EINVAL);
802  }
803  }
804 
805  if (avctx->slices > 1 &&
807  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
808  return AVERROR(EINVAL);
809  }
810 
811  if (m->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
812  av_log(avctx, AV_LOG_INFO,
813  "notice: b_frame_strategy only affects the first pass\n");
814  m->b_frame_strategy = 0;
815  }
816 
817  gcd = av_gcd(avctx->time_base.den, avctx->time_base.num);
818  if (gcd > 1) {
819  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
820  avctx->time_base.den /= gcd;
821  avctx->time_base.num /= gcd;
822  //return -1;
823  }
824 
825  if (s->mpeg_quant || s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO || s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO || s->c.codec_id == AV_CODEC_ID_MJPEG || s->c.codec_id == AV_CODEC_ID_AMV || s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
826  // (a + x * 3 / 8) / x
827  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
828  s->inter_quant_bias = 0;
829  } else {
830  s->intra_quant_bias = 0;
831  // (a - x / 4) / x
832  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
833  }
834 
835  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
836  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
837  return AVERROR(EINVAL);
838  }
839 
840  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
841 
842  switch (avctx->codec->id) {
843 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
845  s->rtp_mode = 1;
846  /* fallthrough */
848  s->c.out_format = FMT_MPEG1;
849  s->c.low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
850  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
851  break;
852 #endif
853 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
854  case AV_CODEC_ID_MJPEG:
855  case AV_CODEC_ID_AMV:
856  s->c.out_format = FMT_MJPEG;
857  m->intra_only = 1; /* force intra only for jpeg */
858  avctx->delay = 0;
859  s->c.low_delay = 1;
860  break;
861 #endif
862  case AV_CODEC_ID_SPEEDHQ:
863  s->c.out_format = FMT_SPEEDHQ;
864  m->intra_only = 1; /* force intra only for SHQ */
865  avctx->delay = 0;
866  s->c.low_delay = 1;
867  break;
868  case AV_CODEC_ID_H261:
869  s->c.out_format = FMT_H261;
870  avctx->delay = 0;
871  s->c.low_delay = 1;
872  s->rtp_mode = 0; /* Sliced encoding not supported */
873  break;
874  case AV_CODEC_ID_H263:
875  if (!CONFIG_H263_ENCODER)
878  s->c.width, s->c.height) == 8) {
879  av_log(avctx, AV_LOG_ERROR,
880  "The specified picture size of %dx%d is not valid for "
881  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
882  "352x288, 704x576, and 1408x1152. "
883  "Try H.263+.\n", s->c.width, s->c.height);
884  return AVERROR(EINVAL);
885  }
886  s->c.out_format = FMT_H263;
887  avctx->delay = 0;
888  s->c.low_delay = 1;
889  break;
890  case AV_CODEC_ID_H263P:
891  s->c.out_format = FMT_H263;
892  /* Fx */
893  s->c.h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
894  s->modified_quant = s->c.h263_aic;
895  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
896  s->me.unrestricted_mv = s->c.obmc || s->loop_filter || s->umvplus;
897  s->flipflop_rounding = 1;
898 
899  /* /Fx */
900  /* These are just to be sure */
901  avctx->delay = 0;
902  s->c.low_delay = 1;
903  break;
904  case AV_CODEC_ID_FLV1:
905  s->c.out_format = FMT_H263;
906  s->me.unrestricted_mv = 1;
907  s->rtp_mode = 0; /* don't allow GOB */
908  avctx->delay = 0;
909  s->c.low_delay = 1;
910  break;
911 #if CONFIG_RV10_ENCODER
912  case AV_CODEC_ID_RV10:
913  s->c.out_format = FMT_H263;
914  avctx->delay = 0;
915  s->c.low_delay = 1;
916  break;
917 #endif
918 #if CONFIG_RV20_ENCODER
919  case AV_CODEC_ID_RV20:
921  s->c.out_format = FMT_H263;
922  avctx->delay = 0;
923  s->c.low_delay = 1;
924  s->modified_quant = 1;
925  // Set here to force allocation of dc_val;
926  // will be set later on a per-frame basis.
927  s->c.h263_aic = 1;
928  s->loop_filter = 1;
929  s->me.unrestricted_mv = 0;
930  break;
931 #endif
932  case AV_CODEC_ID_MPEG4:
933  s->c.out_format = FMT_H263;
934  s->c.h263_pred = 1;
935  s->me.unrestricted_mv = 1;
936  s->flipflop_rounding = 1;
937  s->c.low_delay = m->max_b_frames ? 0 : 1;
938  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
939  break;
941  s->c.out_format = FMT_H263;
942  s->c.h263_pred = 1;
943  s->me.unrestricted_mv = 1;
944  s->c.msmpeg4_version = MSMP4_V2;
945  avctx->delay = 0;
946  s->c.low_delay = 1;
947  break;
949  s->c.out_format = FMT_H263;
950  s->c.h263_pred = 1;
951  s->me.unrestricted_mv = 1;
952  s->c.msmpeg4_version = MSMP4_V3;
953  s->flipflop_rounding = 1;
954  avctx->delay = 0;
955  s->c.low_delay = 1;
956  break;
957  case AV_CODEC_ID_WMV1:
958  s->c.out_format = FMT_H263;
959  s->c.h263_pred = 1;
960  s->me.unrestricted_mv = 1;
961  s->c.msmpeg4_version = MSMP4_WMV1;
962  s->flipflop_rounding = 1;
963  avctx->delay = 0;
964  s->c.low_delay = 1;
965  break;
966  case AV_CODEC_ID_WMV2:
967  s->c.out_format = FMT_H263;
968  s->c.h263_pred = 1;
969  s->me.unrestricted_mv = 1;
970  s->c.msmpeg4_version = MSMP4_WMV2;
971  s->flipflop_rounding = 1;
972  avctx->delay = 0;
973  s->c.low_delay = 1;
974  break;
975  default:
976  av_unreachable("List contains all codecs using ff_mpv_encode_init()");
977  }
978 
979  avctx->has_b_frames = !s->c.low_delay;
980 
981  s->c.encoding = 1;
982 
983  s->c.progressive_frame =
984  s->c.progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
986  s->c.alternate_scan);
987 
988  if (avctx->flags & AV_CODEC_FLAG_PSNR || avctx->mb_decision == FF_MB_DECISION_RD ||
990  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
991  (1 << AV_PICTURE_TYPE_P) |
992  (1 << AV_PICTURE_TYPE_B);
993  } else if (!m->intra_only) {
994  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
995  (1 << AV_PICTURE_TYPE_P);
996  } else {
997  s->frame_reconstruction_bitfield = 0;
998  }
999 
1000  if (m->lmin > m->lmax) {
1001  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", m->lmax);
1002  m->lmin = m->lmax;
1003  }
1004 
1005  /* ff_mpv_init_duplicate_contexts() will copy (memdup) the contents of the
1006  * main slice to the slice contexts, so we initialize various fields of it
1007  * before calling ff_mpv_init_duplicate_contexts(). */
1008  s->parent = m;
1009  ff_mpv_idct_init(&s->c);
1010  init_unquantize(s, avctx);
1011  ff_fdctdsp_init(&s->fdsp, avctx);
1012  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
1013  ff_pixblockdsp_init(&s->pdsp, 8);
1014  ret = me_cmp_init(m, avctx);
1015  if (ret < 0)
1016  return ret;
1017 
1018  if (!(avctx->stats_out = av_mallocz(256)) ||
1019  !(s->new_pic = av_frame_alloc()) ||
1020  !(s->c.picture_pool = ff_mpv_alloc_pic_pool(0)))
1021  return AVERROR(ENOMEM);
1022 
1023  ret = init_matrices(m, avctx);
1024  if (ret < 0)
1025  return ret;
1026 
1028 
1029  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263) {
1031 #if CONFIG_MSMPEG4ENC
1032  if (s->c.msmpeg4_version != MSMP4_UNUSED)
1034 #endif
1035  }
1036 
1037  s->c.slice_ctx_size = sizeof(*s);
1038  ret = ff_mpv_common_init(&s->c);
1039  if (ret < 0)
1040  return ret;
1041  ret = init_buffers(m);
1042  if (ret < 0)
1043  return ret;
1044  if (s->c.slice_context_count > 1) {
1045  s->rtp_mode = 1;
1046  if (avctx->codec_id == AV_CODEC_ID_H263P)
1047  s->h263_slice_structured = 1;
1048  }
1050  if (ret < 0)
1051  return ret;
1052 
1053  ret = init_slice_buffers(m);
1054  if (ret < 0)
1055  return ret;
1056 
1058  if (ret < 0)
1059  return ret;
1060 
1061  if (m->b_frame_strategy == 2) {
1062  for (int i = 0; i < m->max_b_frames + 2; i++) {
1063  m->tmp_frames[i] = av_frame_alloc();
1064  if (!m->tmp_frames[i])
1065  return AVERROR(ENOMEM);
1066 
1068  m->tmp_frames[i]->width = s->c.width >> m->brd_scale;
1069  m->tmp_frames[i]->height = s->c.height >> m->brd_scale;
1070 
1071  ret = av_frame_get_buffer(m->tmp_frames[i], 0);
1072  if (ret < 0)
1073  return ret;
1074  }
1075  }
1076 
1077  cpb_props = ff_encode_add_cpb_side_data(avctx);
1078  if (!cpb_props)
1079  return AVERROR(ENOMEM);
1080  cpb_props->max_bitrate = avctx->rc_max_rate;
1081  cpb_props->min_bitrate = avctx->rc_min_rate;
1082  cpb_props->avg_bitrate = avctx->bit_rate;
1083  cpb_props->buffer_size = avctx->rc_buffer_size;
1084 
1085  return 0;
1086 }
1087 
1089 {
1090  MPVMainEncContext *const m = avctx->priv_data;
1091  MPVEncContext *const s = &m->s;
1092 
1094 
1095  ff_mpv_common_end(&s->c);
1096  av_refstruct_pool_uninit(&s->c.picture_pool);
1097 
1098  for (int i = 0; i < MPVENC_MAX_B_FRAMES + 1; i++) {
1101  }
1102  for (int i = 0; i < FF_ARRAY_ELEMS(m->tmp_frames); i++)
1103  av_frame_free(&m->tmp_frames[i]);
1104 
1105  av_frame_free(&s->new_pic);
1106 
1107  av_freep(&avctx->stats_out);
1108 
1109  av_freep(&m->mv_table_base);
1110  av_freep(&s->p_field_select_table[0]);
1112 
1113  av_freep(&s->mb_type);
1114  av_freep(&s->lambda_table);
1115 
1116  av_freep(&s->q_intra_matrix);
1117  av_freep(&s->q_intra_matrix16);
1118  av_freep(&s->dct_offset);
1119 
1120  return 0;
1121 }
1122 
1123 /* put block[] to dest[] */
1124 static inline void put_dct(MPVEncContext *const s,
1125  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1126 {
1127  s->c.dct_unquantize_intra(&s->c, block, i, qscale);
1128  s->c.idsp.idct_put(dest, line_size, block);
1129 }
1130 
1131 static inline void add_dequant_dct(MPVEncContext *const s,
1132  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1133 {
1134  if (s->c.block_last_index[i] >= 0) {
1135  s->c.dct_unquantize_inter(&s->c, block, i, qscale);
1136 
1137  s->c.idsp.idct_add(dest, line_size, block);
1138  }
1139 }
1140 
1141 /**
1142  * Performs dequantization and IDCT (if necessary)
1143  */
1144 static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
1145 {
1146  if (s->c.avctx->debug & FF_DEBUG_DCT_COEFF) {
1147  /* print DCT coefficients */
1148  av_log(s->c.avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->c.mb_x, s->c.mb_y);
1149  for (int i = 0; i < 6; i++) {
1150  for (int j = 0; j < 64; j++) {
1151  av_log(s->c.avctx, AV_LOG_DEBUG, "%5d",
1152  block[i][s->c.idsp.idct_permutation[j]]);
1153  }
1154  av_log(s->c.avctx, AV_LOG_DEBUG, "\n");
1155  }
1156  }
1157 
1158  if ((1 << s->c.pict_type) & s->frame_reconstruction_bitfield) {
1159  uint8_t *dest_y = s->c.dest[0], *dest_cb = s->c.dest[1], *dest_cr = s->c.dest[2];
1160  int dct_linesize, dct_offset;
1161  const int linesize = s->c.cur_pic.linesize[0];
1162  const int uvlinesize = s->c.cur_pic.linesize[1];
1163  const int block_size = 8;
1164 
1165  dct_linesize = linesize << s->c.interlaced_dct;
1166  dct_offset = s->c.interlaced_dct ? linesize : linesize * block_size;
1167 
1168  if (!s->c.mb_intra) {
1169  /* No MC, as that was already done otherwise */
1170  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1171  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1172  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1173  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1174 
1175  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1176  if (s->c.chroma_y_shift) {
1177  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1178  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1179  } else {
1180  dct_linesize >>= 1;
1181  dct_offset >>= 1;
1182  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1183  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1184  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1185  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1186  }
1187  }
1188  } else {
1189  /* dct only in intra block */
1190  put_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1191  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1192  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1193  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1194 
1195  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1196  if (s->c.chroma_y_shift) {
1197  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1198  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1199  } else {
1200  dct_offset >>= 1;
1201  dct_linesize >>= 1;
1202  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1203  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1204  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1205  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1206  }
1207  }
1208  }
1209  }
1210 }
1211 
1212 static int get_sae(const uint8_t *src, int ref, int stride)
1213 {
1214  int x,y;
1215  int acc = 0;
1216 
1217  for (y = 0; y < 16; y++) {
1218  for (x = 0; x < 16; x++) {
1219  acc += FFABS(src[x + y * stride] - ref);
1220  }
1221  }
1222 
1223  return acc;
1224 }
1225 
1226 static int get_intra_count(MPVEncContext *const s, const uint8_t *src,
1227  const uint8_t *ref, int stride)
1228 {
1229  int x, y, w, h;
1230  int acc = 0;
1231 
1232  w = s->c.width & ~15;
1233  h = s->c.height & ~15;
1234 
1235  for (y = 0; y < h; y += 16) {
1236  for (x = 0; x < w; x += 16) {
1237  int offset = x + y * stride;
1238  int sad = s->sad_cmp[0](NULL, src + offset, ref + offset,
1239  stride, 16);
1240  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1241  int sae = get_sae(src + offset, mean, stride);
1242 
1243  acc += sae + 500 < sad;
1244  }
1245  }
1246  return acc;
1247 }
1248 
1249 /**
1250  * Allocates new buffers for an AVFrame and copies the properties
1251  * from another AVFrame.
1252  */
1253 static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
1254 {
1255  AVCodecContext *avctx = s->c.avctx;
1256  int ret;
1257 
1258  f->width = avctx->width + 2 * EDGE_WIDTH;
1259  f->height = avctx->height + 2 * EDGE_WIDTH;
1260 
1261  ret = ff_encode_alloc_frame(avctx, f);
1262  if (ret < 0)
1263  return ret;
1264 
1265  ret = ff_mpv_pic_check_linesize(avctx, f, &s->c.linesize, &s->c.uvlinesize);
1266  if (ret < 0)
1267  return ret;
1268 
1269  for (int i = 0; f->data[i]; i++) {
1270  int offset = (EDGE_WIDTH >> (i ? s->c.chroma_y_shift : 0)) *
1271  f->linesize[i] +
1272  (EDGE_WIDTH >> (i ? s->c.chroma_x_shift : 0));
1273  f->data[i] += offset;
1274  }
1275  f->width = avctx->width;
1276  f->height = avctx->height;
1277 
1278  ret = av_frame_copy_props(f, props_frame);
1279  if (ret < 0)
1280  return ret;
1281 
1282  return 0;
1283 }
1284 
1285 static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
1286 {
1287  MPVEncContext *const s = &m->s;
1288  MPVPicture *pic = NULL;
1289  int64_t pts;
1290  int display_picture_number = 0, ret;
1291  int encoding_delay = m->max_b_frames ? m->max_b_frames
1292  : (s->c.low_delay ? 0 : 1);
1293  int flush_offset = 1;
1294  int direct = 1;
1295 
1296  av_assert1(!m->input_picture[0]);
1297 
1298  if (pic_arg) {
1299  pts = pic_arg->pts;
1300  display_picture_number = m->input_picture_number++;
1301 
1302  if (pts != AV_NOPTS_VALUE) {
1303  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1304  int64_t last = m->user_specified_pts;
1305 
1306  if (pts <= last) {
1307  av_log(s->c.avctx, AV_LOG_ERROR,
1308  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1309  pts, last);
1310  return AVERROR(EINVAL);
1311  }
1312 
1313  if (!s->c.low_delay && display_picture_number == 1)
1314  m->dts_delta = pts - last;
1315  }
1316  m->user_specified_pts = pts;
1317  } else {
1318  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1319  m->user_specified_pts =
1320  pts = m->user_specified_pts + 1;
1321  av_log(s->c.avctx, AV_LOG_INFO,
1322  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1323  pts);
1324  } else {
1325  pts = display_picture_number;
1326  }
1327  }
1328 
1329  if (pic_arg->linesize[0] != s->c.linesize ||
1330  pic_arg->linesize[1] != s->c.uvlinesize ||
1331  pic_arg->linesize[2] != s->c.uvlinesize)
1332  direct = 0;
1333  if ((s->c.width & 15) || (s->c.height & 15))
1334  direct = 0;
1335  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1336  direct = 0;
1337  if (s->c.linesize & (STRIDE_ALIGN-1))
1338  direct = 0;
1339 
1340  ff_dlog(s->c.avctx, "%d %d %td %td\n", pic_arg->linesize[0],
1341  pic_arg->linesize[1], s->c.linesize, s->c.uvlinesize);
1342 
1343  pic = av_refstruct_pool_get(s->c.picture_pool);
1344  if (!pic)
1345  return AVERROR(ENOMEM);
1346 
1347  if (direct) {
1348  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1349  goto fail;
1350  pic->shared = 1;
1351  } else {
1352  ret = prepare_picture(s, pic->f, pic_arg);
1353  if (ret < 0)
1354  goto fail;
1355 
1356  for (int i = 0; i < 3; i++) {
1357  ptrdiff_t src_stride = pic_arg->linesize[i];
1358  ptrdiff_t dst_stride = i ? s->c.uvlinesize : s->c.linesize;
1359  int h_shift = i ? s->c.chroma_x_shift : 0;
1360  int v_shift = i ? s->c.chroma_y_shift : 0;
1361  int w = AV_CEIL_RSHIFT(s->c.width , h_shift);
1362  int h = AV_CEIL_RSHIFT(s->c.height, v_shift);
1363  const uint8_t *src = pic_arg->data[i];
1364  uint8_t *dst = pic->f->data[i];
1365  int vpad = 16;
1366 
1367  if ( s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO
1368  && !s->c.progressive_sequence
1369  && FFALIGN(s->c.height, 32) - s->c.height > 16)
1370  vpad = 32;
1371 
1372  if (!s->c.avctx->rc_buffer_size)
1373  dst += INPLACE_OFFSET;
1374 
1375  if (src_stride == dst_stride)
1376  memcpy(dst, src, src_stride * h - src_stride + w);
1377  else {
1378  int h2 = h;
1379  uint8_t *dst2 = dst;
1380  while (h2--) {
1381  memcpy(dst2, src, w);
1382  dst2 += dst_stride;
1383  src += src_stride;
1384  }
1385  }
1386  if ((s->c.width & 15) || (s->c.height & (vpad-1))) {
1387  s->mpvencdsp.draw_edges(dst, dst_stride,
1388  w, h,
1389  16 >> h_shift,
1390  vpad >> v_shift,
1391  EDGE_BOTTOM);
1392  }
1393  }
1394  }
1395 
1396  pic->display_picture_number = display_picture_number;
1397  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1398  } else if (!m->reordered_input_picture[1]) {
1399  /* Flushing: When the above check is true, the encoder is about to run
1400  * out of frames to encode. Check if there are input_pictures left;
1401  * if so, ensure m->input_picture[0] contains the first picture.
1402  * A flush_offset != 1 will only happen if we did not receive enough
1403  * input frames. */
1404  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1405  if (m->input_picture[flush_offset])
1406  break;
1407 
1408  encoding_delay -= flush_offset - 1;
1409  }
1410 
1411  /* shift buffer entries */
1412  for (int i = flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1413  m->input_picture[i - flush_offset] = m->input_picture[i];
1414  for (int i = MPVENC_MAX_B_FRAMES + 1 - flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1415  m->input_picture[i] = NULL;
1416 
1417  m->input_picture[encoding_delay] = pic;
1418 
1419  return 0;
1420 fail:
1421  av_refstruct_unref(&pic);
1422  return ret;
1423 }
1424 
1425 static int skip_check(MPVMainEncContext *const m,
1426  const MPVPicture *p, const MPVPicture *ref)
1427 {
1428  MPVEncContext *const s = &m->s;
1429  int score = 0;
1430  int64_t score64 = 0;
1431 
1432  for (int plane = 0; plane < 3; plane++) {
1433  const int stride = p->f->linesize[plane];
1434  const int bw = plane ? 1 : 2;
1435  for (int y = 0; y < s->c.mb_height * bw; y++) {
1436  for (int x = 0; x < s->c.mb_width * bw; x++) {
1437  int off = p->shared ? 0 : 16;
1438  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1439  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1440  int v = m->frame_skip_cmp_fn(s, dptr, rptr, stride, 8);
1441 
1442  switch (FFABS(m->frame_skip_exp)) {
1443  case 0: score = FFMAX(score, v); break;
1444  case 1: score += FFABS(v); break;
1445  case 2: score64 += v * (int64_t)v; break;
1446  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1447  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1448  }
1449  }
1450  }
1451  }
1452  emms_c();
1453 
1454  if (score)
1455  score64 = score;
1456  if (m->frame_skip_exp < 0)
1457  score64 = pow(score64 / (double)(s->c.mb_width * s->c.mb_height),
1458  -1.0/m->frame_skip_exp);
1459 
1460  if (score64 < m->frame_skip_threshold)
1461  return 1;
1462  if (score64 < ((m->frame_skip_factor * (int64_t) s->lambda) >> 8))
1463  return 1;
1464  return 0;
1465 }
1466 
1468 {
1469  int ret;
1470  int size = 0;
1471 
1473  if (ret < 0)
1474  return ret;
1475 
1476  do {
1478  if (ret >= 0) {
1479  size += pkt->size;
1481  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1482  return ret;
1483  } while (ret >= 0);
1484 
1485  return size;
1486 }
1487 
1489 {
1490  MPVEncContext *const s = &m->s;
1491  AVPacket *pkt;
1492  const int scale = m->brd_scale;
1493  int width = s->c.width >> scale;
1494  int height = s->c.height >> scale;
1495  int out_size, p_lambda, b_lambda, lambda2;
1496  int64_t best_rd = INT64_MAX;
1497  int best_b_count = -1;
1498  int ret = 0;
1499 
1500  av_assert0(scale >= 0 && scale <= 3);
1501 
1502  pkt = av_packet_alloc();
1503  if (!pkt)
1504  return AVERROR(ENOMEM);
1505 
1506  p_lambda = m->last_lambda_for[AV_PICTURE_TYPE_P];
1507  //p_lambda * FFABS(s->c.avctx->b_quant_factor) + s->c.avctx->b_quant_offset;
1508  b_lambda = m->last_lambda_for[AV_PICTURE_TYPE_B];
1509  if (!b_lambda) // FIXME we should do this somewhere else
1510  b_lambda = p_lambda;
1511  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1513 
1514  for (int i = 0; i < m->max_b_frames + 2; i++) {
1515  const MPVPicture *pre_input_ptr = i ? m->input_picture[i - 1] :
1516  s->c.next_pic.ptr;
1517 
1518  if (pre_input_ptr) {
1519  const uint8_t *data[4];
1520  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1521 
1522  if (!pre_input_ptr->shared && i) {
1523  data[0] += INPLACE_OFFSET;
1524  data[1] += INPLACE_OFFSET;
1525  data[2] += INPLACE_OFFSET;
1526  }
1527 
1528  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[0],
1529  m->tmp_frames[i]->linesize[0],
1530  data[0],
1531  pre_input_ptr->f->linesize[0],
1532  width, height);
1533  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[1],
1534  m->tmp_frames[i]->linesize[1],
1535  data[1],
1536  pre_input_ptr->f->linesize[1],
1537  width >> 1, height >> 1);
1538  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[2],
1539  m->tmp_frames[i]->linesize[2],
1540  data[2],
1541  pre_input_ptr->f->linesize[2],
1542  width >> 1, height >> 1);
1543  }
1544  }
1545 
1546  for (int j = 0; j < m->max_b_frames + 1; j++) {
1547  AVCodecContext *c;
1548  int64_t rd = 0;
1549 
1550  if (!m->input_picture[j])
1551  break;
1552 
1554  if (!c) {
1555  ret = AVERROR(ENOMEM);
1556  goto fail;
1557  }
1558 
1559  c->width = width;
1560  c->height = height;
1562  c->flags |= s->c.avctx->flags & AV_CODEC_FLAG_QPEL;
1563  c->mb_decision = s->c.avctx->mb_decision;
1564  c->me_cmp = s->c.avctx->me_cmp;
1565  c->mb_cmp = s->c.avctx->mb_cmp;
1566  c->me_sub_cmp = s->c.avctx->me_sub_cmp;
1567  c->pix_fmt = AV_PIX_FMT_YUV420P;
1568  c->time_base = s->c.avctx->time_base;
1569  c->max_b_frames = m->max_b_frames;
1570 
1571  ret = avcodec_open2(c, s->c.avctx->codec, NULL);
1572  if (ret < 0)
1573  goto fail;
1574 
1575 
1577  m->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1578 
1579  out_size = encode_frame(c, m->tmp_frames[0], pkt);
1580  if (out_size < 0) {
1581  ret = out_size;
1582  goto fail;
1583  }
1584 
1585  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1586 
1587  for (int i = 0; i < m->max_b_frames + 1; i++) {
1588  int is_p = i % (j + 1) == j || i == m->max_b_frames;
1589 
1590  m->tmp_frames[i + 1]->pict_type = is_p ?
1592  m->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1593 
1594  out_size = encode_frame(c, m->tmp_frames[i + 1], pkt);
1595  if (out_size < 0) {
1596  ret = out_size;
1597  goto fail;
1598  }
1599 
1600  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1601  }
1602 
1603  /* get the delayed frames */
1605  if (out_size < 0) {
1606  ret = out_size;
1607  goto fail;
1608  }
1609  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1610 
1611  rd += c->error[0] + c->error[1] + c->error[2];
1612 
1613  if (rd < best_rd) {
1614  best_rd = rd;
1615  best_b_count = j;
1616  }
1617 
1618 fail:
1621  if (ret < 0) {
1622  best_b_count = ret;
1623  break;
1624  }
1625  }
1626 
1627  av_packet_free(&pkt);
1628 
1629  return best_b_count;
1630 }
1631 
1632 /**
1633  * Determines whether an input picture is discarded or not
1634  * and if not determines the length of the next chain of B frames
1635  * and moves these pictures (including the P frame) into
1636  * reordered_input_picture.
1637  * input_picture[0] is always NULL when exiting this function, even on error;
1638  * reordered_input_picture[0] is always NULL when exiting this function on error.
1639  */
1641 {
1642  MPVEncContext *const s = &m->s;
1643 
1644  /* Either nothing to do or can't do anything */
1645  if (m->reordered_input_picture[0] || !m->input_picture[0])
1646  return 0;
1647 
1648  /* set next picture type & ordering */
1649  if (m->frame_skip_threshold || m->frame_skip_factor) {
1650  if (m->picture_in_gop_number < m->gop_size &&
1651  s->c.next_pic.ptr &&
1652  skip_check(m, m->input_picture[0], s->c.next_pic.ptr)) {
1653  // FIXME check that the gop check above is +-1 correct
1655 
1656  ff_vbv_update(m, 0);
1657 
1658  return 0;
1659  }
1660  }
1661 
1662  if (/* m->picture_in_gop_number >= m->gop_size || */
1663  !s->c.next_pic.ptr || m->intra_only) {
1664  m->reordered_input_picture[0] = m->input_picture[0];
1665  m->input_picture[0] = NULL;
1668  m->coded_picture_number++;
1669  } else {
1670  int b_frames = 0;
1671 
1672  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
1673  for (int i = 0; i < m->max_b_frames + 1; i++) {
1674  int pict_num = m->input_picture[0]->display_picture_number + i;
1675 
1676  if (pict_num >= m->rc_context.num_entries)
1677  break;
1678  if (!m->input_picture[i]) {
1679  m->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1680  break;
1681  }
1682 
1683  m->input_picture[i]->f->pict_type =
1684  m->rc_context.entry[pict_num].new_pict_type;
1685  }
1686  }
1687 
1688  if (m->b_frame_strategy == 0) {
1689  b_frames = m->max_b_frames;
1690  while (b_frames && !m->input_picture[b_frames])
1691  b_frames--;
1692  } else if (m->b_frame_strategy == 1) {
1693  for (int i = 1; i < m->max_b_frames + 1; i++) {
1694  if (m->input_picture[i] &&
1695  m->input_picture[i]->b_frame_score == 0) {
1698  m->input_picture[i ]->f->data[0],
1699  m->input_picture[i - 1]->f->data[0],
1700  s->c.linesize) + 1;
1701  }
1702  }
1703  for (int i = 0;; i++) {
1704  if (i >= m->max_b_frames + 1 ||
1705  !m->input_picture[i] ||
1706  m->input_picture[i]->b_frame_score - 1 >
1707  s->c.mb_num / m->b_sensitivity) {
1708  b_frames = FFMAX(0, i - 1);
1709  break;
1710  }
1711  }
1712 
1713  /* reset scores */
1714  for (int i = 0; i < b_frames + 1; i++)
1715  m->input_picture[i]->b_frame_score = 0;
1716  } else if (m->b_frame_strategy == 2) {
1717  b_frames = estimate_best_b_count(m);
1718  if (b_frames < 0) {
1720  return b_frames;
1721  }
1722  }
1723 
1724  for (int i = b_frames - 1; i >= 0; i--) {
1725  int type = m->input_picture[i]->f->pict_type;
1726  if (type && type != AV_PICTURE_TYPE_B)
1727  b_frames = i;
1728  }
1729  if (m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1730  b_frames == m->max_b_frames) {
1731  av_log(s->c.avctx, AV_LOG_ERROR,
1732  "warning, too many B-frames in a row\n");
1733  }
1734 
1735  if (m->picture_in_gop_number + b_frames >= m->gop_size) {
1736  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1737  m->gop_size > m->picture_in_gop_number) {
1738  b_frames = m->gop_size - m->picture_in_gop_number - 1;
1739  } else {
1740  if (s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1741  b_frames = 0;
1742  m->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1743  }
1744  }
1745 
1746  if ((s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1747  m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1748  b_frames--;
1749 
1750  m->reordered_input_picture[0] = m->input_picture[b_frames];
1751  m->input_picture[b_frames] = NULL;
1755  m->coded_picture_number++;
1756  for (int i = 0; i < b_frames; i++) {
1757  m->reordered_input_picture[i + 1] = m->input_picture[i];
1758  m->input_picture[i] = NULL;
1759  m->reordered_input_picture[i + 1]->f->pict_type =
1762  m->coded_picture_number++;
1763  }
1764  }
1765 
1766  return 0;
1767 }
1768 
1770 {
1771  MPVEncContext *const s = &m->s;
1772  int ret;
1773 
1775 
1776  for (int i = 1; i <= MPVENC_MAX_B_FRAMES; i++)
1779 
1781  av_assert1(!m->input_picture[0]);
1782  if (ret < 0)
1783  return ret;
1784 
1785  av_frame_unref(s->new_pic);
1786 
1787  if (m->reordered_input_picture[0]) {
1790 
1791  if (m->reordered_input_picture[0]->shared || s->c.avctx->rc_buffer_size) {
1792  // input is a shared pix, so we can't modify it -> allocate a new
1793  // one & ensure that the shared one is reusable
1794  av_frame_move_ref(s->new_pic, m->reordered_input_picture[0]->f);
1795 
1796  ret = prepare_picture(s, m->reordered_input_picture[0]->f, s->new_pic);
1797  if (ret < 0)
1798  goto fail;
1799  } else {
1800  // input is not a shared pix -> reuse buffer for current_pix
1801  ret = av_frame_ref(s->new_pic, m->reordered_input_picture[0]->f);
1802  if (ret < 0)
1803  goto fail;
1804  for (int i = 0; i < MPV_MAX_PLANES; i++)
1805  s->new_pic->data[i] += INPLACE_OFFSET;
1806  }
1807  s->c.cur_pic.ptr = m->reordered_input_picture[0];
1808  m->reordered_input_picture[0] = NULL;
1809  av_assert1(s->c.mb_width == s->c.buffer_pools.alloc_mb_width);
1810  av_assert1(s->c.mb_height == s->c.buffer_pools.alloc_mb_height);
1811  av_assert1(s->c.mb_stride == s->c.buffer_pools.alloc_mb_stride);
1812  ret = ff_mpv_alloc_pic_accessories(s->c.avctx, &s->c.cur_pic,
1813  &s->c.sc, &s->c.buffer_pools, s->c.mb_height);
1814  if (ret < 0) {
1815  ff_mpv_unref_picture(&s->c.cur_pic);
1816  return ret;
1817  }
1818  s->picture_number = s->c.cur_pic.ptr->display_picture_number;
1819 
1820  }
1821  return 0;
1822 fail:
1824  return ret;
1825 }
1826 
1827 static void frame_end(MPVMainEncContext *const m)
1828 {
1829  MPVEncContext *const s = &m->s;
1830 
1831  if (s->me.unrestricted_mv &&
1832  s->c.cur_pic.reference &&
1833  !m->intra_only) {
1834  int hshift = s->c.chroma_x_shift;
1835  int vshift = s->c.chroma_y_shift;
1836  s->mpvencdsp.draw_edges(s->c.cur_pic.data[0],
1837  s->c.cur_pic.linesize[0],
1838  s->c.h_edge_pos, s->c.v_edge_pos,
1840  EDGE_TOP | EDGE_BOTTOM);
1841  s->mpvencdsp.draw_edges(s->c.cur_pic.data[1],
1842  s->c.cur_pic.linesize[1],
1843  s->c.h_edge_pos >> hshift,
1844  s->c.v_edge_pos >> vshift,
1845  EDGE_WIDTH >> hshift,
1846  EDGE_WIDTH >> vshift,
1847  EDGE_TOP | EDGE_BOTTOM);
1848  s->mpvencdsp.draw_edges(s->c.cur_pic.data[2],
1849  s->c.cur_pic.linesize[2],
1850  s->c.h_edge_pos >> hshift,
1851  s->c.v_edge_pos >> vshift,
1852  EDGE_WIDTH >> hshift,
1853  EDGE_WIDTH >> vshift,
1854  EDGE_TOP | EDGE_BOTTOM);
1855  }
1856 
1857  m->last_pict_type = s->c.pict_type;
1858  m->last_lambda_for[s->c.pict_type] = s->c.cur_pic.ptr->f->quality;
1859  if (s->c.pict_type != AV_PICTURE_TYPE_B)
1860  m->last_non_b_pict_type = s->c.pict_type;
1861 }
1862 
1864 {
1865  MPVEncContext *const s = &m->s;
1866  int intra, i;
1867 
1868  for (intra = 0; intra < 2; intra++) {
1869  if (s->dct_count[intra] > (1 << 16)) {
1870  for (i = 0; i < 64; i++) {
1871  s->dct_error_sum[intra][i] >>= 1;
1872  }
1873  s->dct_count[intra] >>= 1;
1874  }
1875 
1876  for (i = 0; i < 64; i++) {
1877  s->dct_offset[intra][i] = (m->noise_reduction *
1878  s->dct_count[intra] +
1879  s->dct_error_sum[intra][i] / 2) /
1880  (s->dct_error_sum[intra][i] + 1);
1881  }
1882  }
1883 }
1884 
1885 static void frame_start(MPVMainEncContext *const m)
1886 {
1887  MPVEncContext *const s = &m->s;
1888 
1889  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
1890 
1891  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
1892  ff_mpv_replace_picture(&s->c.last_pic, &s->c.next_pic);
1893  ff_mpv_replace_picture(&s->c.next_pic, &s->c.cur_pic);
1894  }
1895 
1896  av_assert2(!!m->noise_reduction == !!s->dct_error_sum);
1897  if (s->dct_error_sum) {
1899  }
1900 }
1901 
1903  const AVFrame *pic_arg, int *got_packet)
1904 {
1905  MPVMainEncContext *const m = avctx->priv_data;
1906  MPVEncContext *const s = &m->s;
1907  int stuffing_count, ret;
1908  int context_count = s->c.slice_context_count;
1909 
1910  ff_mpv_unref_picture(&s->c.cur_pic);
1911 
1912  m->vbv_ignore_qmax = 0;
1913 
1914  m->picture_in_gop_number++;
1915 
1916  ret = load_input_picture(m, pic_arg);
1917  if (ret < 0)
1918  return ret;
1919 
1921  if (ret < 0)
1922  return ret;
1923 
1924  /* output? */
1925  if (s->new_pic->data[0]) {
1926  int growing_buffer = context_count == 1 && !s->data_partitioning;
1927  size_t pkt_size = 10000 + s->c.mb_width * s->c.mb_height *
1928  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1929  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1930  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1931  if (ret < 0)
1932  return ret;
1933  }
1934  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1935  return ret;
1937  if (s->mb_info) {
1938  s->mb_info_ptr = av_packet_new_side_data(pkt,
1940  s->c.mb_width*s->c.mb_height*12);
1941  if (!s->mb_info_ptr)
1942  return AVERROR(ENOMEM);
1943  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1944  }
1945 
1946  s->c.pict_type = s->new_pic->pict_type;
1947  frame_start(m);
1948 vbv_retry:
1949  ret = encode_picture(m, pkt);
1950  if (growing_buffer) {
1951  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1952  pkt->data = s->pb.buf;
1953  pkt->size = avctx->internal->byte_buffer_size;
1954  }
1955  if (ret < 0)
1956  return -1;
1957 
1958  frame_end(m);
1959 
1960  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->c.out_format == FMT_MJPEG)
1962 
1963  if (avctx->rc_buffer_size) {
1964  RateControlContext *rcc = &m->rc_context;
1965  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1966  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1967  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1968 
1969  if (put_bits_count(&s->pb) > max_size &&
1970  s->lambda < m->lmax) {
1971  m->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1972  (s->c.qscale + 1) / s->c.qscale);
1973  if (s->adaptive_quant) {
1974  for (int i = 0; i < s->c.mb_height * s->c.mb_stride; i++)
1975  s->lambda_table[i] =
1976  FFMAX(s->lambda_table[i] + min_step,
1977  s->lambda_table[i] * (s->c.qscale + 1) /
1978  s->c.qscale);
1979  }
1980  s->c.mb_skipped = 0; // done in frame_start()
1981  // done in encode_picture() so we must undo it
1982  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
1983  s->c.no_rounding ^= s->flipflop_rounding;
1984  }
1985  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
1986  s->c.time_base = s->c.last_time_base;
1987  s->c.last_non_b_time = s->c.time - s->c.pp_time;
1988  }
1989  m->vbv_ignore_qmax = 1;
1990  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1991  goto vbv_retry;
1992  }
1993 
1994  av_assert0(avctx->rc_max_rate);
1995  }
1996 
1997  if (avctx->flags & AV_CODEC_FLAG_PASS1)
1999 
2000  for (int i = 0; i < MPV_MAX_PLANES; i++)
2001  avctx->error[i] += s->encoding_error[i];
2002  ff_encode_add_stats_side_data(pkt, s->c.cur_pic.ptr->f->quality,
2003  s->encoding_error,
2004  (avctx->flags&AV_CODEC_FLAG_PSNR) ? MPV_MAX_PLANES : 0,
2005  s->c.pict_type);
2006 
2007  if (avctx->flags & AV_CODEC_FLAG_PASS1)
2008  assert(put_bits_count(&s->pb) == m->header_bits + s->mv_bits +
2009  s->misc_bits + s->i_tex_bits +
2010  s->p_tex_bits);
2011  flush_put_bits(&s->pb);
2012  m->frame_bits = put_bits_count(&s->pb);
2013 
2014  stuffing_count = ff_vbv_update(m, m->frame_bits);
2015  m->stuffing_bits = 8*stuffing_count;
2016  if (stuffing_count) {
2017  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
2018  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2019  return -1;
2020  }
2021 
2022  switch (s->c.codec_id) {
2025  while (stuffing_count--) {
2026  put_bits(&s->pb, 8, 0);
2027  }
2028  break;
2029  case AV_CODEC_ID_MPEG4:
2030  put_bits(&s->pb, 16, 0);
2031  put_bits(&s->pb, 16, 0x1C3);
2032  stuffing_count -= 4;
2033  while (stuffing_count--) {
2034  put_bits(&s->pb, 8, 0xFF);
2035  }
2036  break;
2037  default:
2038  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2039  m->stuffing_bits = 0;
2040  }
2041  flush_put_bits(&s->pb);
2042  m->frame_bits = put_bits_count(&s->pb);
2043  }
2044 
2045  /* update MPEG-1/2 vbv_delay for CBR */
2046  if (avctx->rc_max_rate &&
2047  avctx->rc_min_rate == avctx->rc_max_rate &&
2048  s->c.out_format == FMT_MPEG1 &&
2049  90000LL * (avctx->rc_buffer_size - 1) <=
2050  avctx->rc_max_rate * 0xFFFFLL) {
2051  AVCPBProperties *props;
2052  size_t props_size;
2053 
2054  int vbv_delay, min_delay;
2055  double inbits = avctx->rc_max_rate *
2056  av_q2d(avctx->time_base);
2057  int minbits = m->frame_bits - 8 *
2058  (m->vbv_delay_pos - 1);
2059  double bits = m->rc_context.buffer_index + minbits - inbits;
2060  uint8_t *const vbv_delay_ptr = s->pb.buf + m->vbv_delay_pos;
2061 
2062  if (bits < 0)
2063  av_log(avctx, AV_LOG_ERROR,
2064  "Internal error, negative bits\n");
2065 
2066  av_assert1(s->c.repeat_first_field == 0);
2067 
2068  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2069  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2070  avctx->rc_max_rate;
2071 
2072  vbv_delay = FFMAX(vbv_delay, min_delay);
2073 
2074  av_assert0(vbv_delay < 0xFFFF);
2075 
2076  vbv_delay_ptr[0] &= 0xF8;
2077  vbv_delay_ptr[0] |= vbv_delay >> 13;
2078  vbv_delay_ptr[1] = vbv_delay >> 5;
2079  vbv_delay_ptr[2] &= 0x07;
2080  vbv_delay_ptr[2] |= vbv_delay << 3;
2081 
2082  props = av_cpb_properties_alloc(&props_size);
2083  if (!props)
2084  return AVERROR(ENOMEM);
2085  props->vbv_delay = vbv_delay * 300;
2086 
2088  (uint8_t*)props, props_size);
2089  if (ret < 0) {
2090  av_freep(&props);
2091  return ret;
2092  }
2093  }
2094  m->total_bits += m->frame_bits;
2095 
2096  pkt->pts = s->c.cur_pic.ptr->f->pts;
2097  pkt->duration = s->c.cur_pic.ptr->f->duration;
2098  if (!s->c.low_delay && s->c.pict_type != AV_PICTURE_TYPE_B) {
2099  if (!s->c.cur_pic.ptr->coded_picture_number)
2100  pkt->dts = pkt->pts - m->dts_delta;
2101  else
2102  pkt->dts = m->reordered_pts;
2103  m->reordered_pts = pkt->pts;
2104  } else
2105  pkt->dts = pkt->pts;
2106 
2107  // the no-delay case is handled in generic code
2108  if (avctx->codec->capabilities & AV_CODEC_CAP_DELAY) {
2109  ret = ff_encode_reordered_opaque(avctx, pkt, s->c.cur_pic.ptr->f);
2110  if (ret < 0)
2111  return ret;
2112  }
2113 
2114  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
2116  if (s->mb_info)
2118  } else {
2119  m->frame_bits = 0;
2120  }
2121 
2122  ff_mpv_unref_picture(&s->c.cur_pic);
2123 
2124  av_assert1((m->frame_bits & 7) == 0);
2125 
2126  pkt->size = m->frame_bits / 8;
2127  *got_packet = !!pkt->size;
2128  return 0;
2129 }
2130 
2132  int n, int threshold)
2133 {
2134  static const char tab[64] = {
2135  3, 2, 2, 1, 1, 1, 1, 1,
2136  1, 1, 1, 1, 1, 1, 1, 1,
2137  1, 1, 1, 1, 1, 1, 1, 1,
2138  0, 0, 0, 0, 0, 0, 0, 0,
2139  0, 0, 0, 0, 0, 0, 0, 0,
2140  0, 0, 0, 0, 0, 0, 0, 0,
2141  0, 0, 0, 0, 0, 0, 0, 0,
2142  0, 0, 0, 0, 0, 0, 0, 0
2143  };
2144  int score = 0;
2145  int run = 0;
2146  int i;
2147  int16_t *block = s->block[n];
2148  const int last_index = s->c.block_last_index[n];
2149  int skip_dc;
2150 
2151  if (threshold < 0) {
2152  skip_dc = 0;
2153  threshold = -threshold;
2154  } else
2155  skip_dc = 1;
2156 
2157  /* Are all we could set to zero already zero? */
2158  if (last_index <= skip_dc - 1)
2159  return;
2160 
2161  for (i = 0; i <= last_index; i++) {
2162  const int j = s->c.intra_scantable.permutated[i];
2163  const int level = FFABS(block[j]);
2164  if (level == 1) {
2165  if (skip_dc && i == 0)
2166  continue;
2167  score += tab[run];
2168  run = 0;
2169  } else if (level > 1) {
2170  return;
2171  } else {
2172  run++;
2173  }
2174  }
2175  if (score >= threshold)
2176  return;
2177  for (i = skip_dc; i <= last_index; i++) {
2178  const int j = s->c.intra_scantable.permutated[i];
2179  block[j] = 0;
2180  }
2181  if (block[0])
2182  s->c.block_last_index[n] = 0;
2183  else
2184  s->c.block_last_index[n] = -1;
2185 }
2186 
2187 static inline void clip_coeffs(const MPVEncContext *const s, int16_t block[],
2188  int last_index)
2189 {
2190  int i;
2191  const int maxlevel = s->max_qcoeff;
2192  const int minlevel = s->min_qcoeff;
2193  int overflow = 0;
2194 
2195  if (s->c.mb_intra) {
2196  i = 1; // skip clipping of intra dc
2197  } else
2198  i = 0;
2199 
2200  for (; i <= last_index; i++) {
2201  const int j = s->c.intra_scantable.permutated[i];
2202  int level = block[j];
2203 
2204  if (level > maxlevel) {
2205  level = maxlevel;
2206  overflow++;
2207  } else if (level < minlevel) {
2208  level = minlevel;
2209  overflow++;
2210  }
2211 
2212  block[j] = level;
2213  }
2214 
2215  if (overflow && s->c.avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2216  av_log(s->c.avctx, AV_LOG_INFO,
2217  "warning, clipping %d dct coefficients to %d..%d\n",
2218  overflow, minlevel, maxlevel);
2219 }
2220 
2221 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2222 {
2223  int x, y;
2224  // FIXME optimize
2225  for (y = 0; y < 8; y++) {
2226  for (x = 0; x < 8; x++) {
2227  int x2, y2;
2228  int sum = 0;
2229  int sqr = 0;
2230  int count = 0;
2231 
2232  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2233  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2234  int v = ptr[x2 + y2 * stride];
2235  sum += v;
2236  sqr += v * v;
2237  count++;
2238  }
2239  }
2240  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2241  }
2242  }
2243 }
2244 
2246  int motion_x, int motion_y,
2247  int mb_block_height,
2248  int mb_block_width,
2249  int mb_block_count,
2250  int chroma_x_shift,
2251  int chroma_y_shift,
2252  int chroma_format)
2253 {
2254 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2255  * and neither of these encoders currently supports 444. */
2256 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2257  (s)->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2258  DECLARE_ALIGNED(16, int16_t, weight)[12][64];
2259  int16_t orig[12][64];
2260  const int mb_x = s->c.mb_x;
2261  const int mb_y = s->c.mb_y;
2262  int i;
2263  int skip_dct[12];
2264  int dct_offset = s->c.linesize * 8; // default for progressive frames
2265  int uv_dct_offset = s->c.uvlinesize * 8;
2266  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2267  ptrdiff_t wrap_y, wrap_c;
2268 
2269  for (i = 0; i < mb_block_count; i++)
2270  skip_dct[i] = s->skipdct;
2271 
2272  if (s->adaptive_quant) {
2273  const int last_qp = s->c.qscale;
2274  const int mb_xy = mb_x + mb_y * s->c.mb_stride;
2275 
2276  s->lambda = s->lambda_table[mb_xy];
2277  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
2279 
2280  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2281  s->dquant = s->c.cur_pic.qscale_table[mb_xy] - last_qp;
2282 
2283  if (s->c.out_format == FMT_H263) {
2284  s->dquant = av_clip(s->dquant, -2, 2);
2285 
2286  if (s->c.codec_id == AV_CODEC_ID_MPEG4) {
2287  if (!s->c.mb_intra) {
2288  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
2289  if (s->dquant & 1 || s->c.mv_dir & MV_DIRECT)
2290  s->dquant = 0;
2291  }
2292  if (s->c.mv_type == MV_TYPE_8X8)
2293  s->dquant = 0;
2294  }
2295  }
2296  }
2297  }
2298  ff_set_qscale(&s->c, last_qp + s->dquant);
2299  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2300  ff_set_qscale(&s->c, s->c.qscale + s->dquant);
2301 
2302  wrap_y = s->c.linesize;
2303  wrap_c = s->c.uvlinesize;
2304  ptr_y = s->new_pic->data[0] +
2305  (mb_y * 16 * wrap_y) + mb_x * 16;
2306  ptr_cb = s->new_pic->data[1] +
2307  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2308  ptr_cr = s->new_pic->data[2] +
2309  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2310 
2311  if ((mb_x * 16 + 16 > s->c.width || mb_y * 16 + 16 > s->c.height) &&
2312  s->c.codec_id != AV_CODEC_ID_AMV) {
2313  uint8_t *ebuf = s->c.sc.edge_emu_buffer + 38 * wrap_y;
2314  int cw = (s->c.width + chroma_x_shift) >> chroma_x_shift;
2315  int ch = (s->c.height + chroma_y_shift) >> chroma_y_shift;
2316  s->c.vdsp.emulated_edge_mc(ebuf, ptr_y,
2317  wrap_y, wrap_y,
2318  16, 16, mb_x * 16, mb_y * 16,
2319  s->c.width, s->c.height);
2320  ptr_y = ebuf;
2321  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2322  wrap_c, wrap_c,
2323  mb_block_width, mb_block_height,
2324  mb_x * mb_block_width, mb_y * mb_block_height,
2325  cw, ch);
2326  ptr_cb = ebuf + 16 * wrap_y;
2327  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2328  wrap_c, wrap_c,
2329  mb_block_width, mb_block_height,
2330  mb_x * mb_block_width, mb_y * mb_block_height,
2331  cw, ch);
2332  ptr_cr = ebuf + 16 * wrap_y + 16;
2333  }
2334 
2335  if (s->c.mb_intra) {
2336  if (INTERLACED_DCT(s)) {
2337  int progressive_score, interlaced_score;
2338 
2339  s->c.interlaced_dct = 0;
2340  progressive_score = s->ildct_cmp[1](s, ptr_y, NULL, wrap_y, 8) +
2341  s->ildct_cmp[1](s, ptr_y + wrap_y * 8,
2342  NULL, wrap_y, 8) - 400;
2343 
2344  if (progressive_score > 0) {
2345  interlaced_score = s->ildct_cmp[1](s, ptr_y,
2346  NULL, wrap_y * 2, 8) +
2347  s->ildct_cmp[1](s, ptr_y + wrap_y,
2348  NULL, wrap_y * 2, 8);
2349  if (progressive_score > interlaced_score) {
2350  s->c.interlaced_dct = 1;
2351 
2352  dct_offset = wrap_y;
2353  uv_dct_offset = wrap_c;
2354  wrap_y <<= 1;
2355  if (chroma_format == CHROMA_422 ||
2356  chroma_format == CHROMA_444)
2357  wrap_c <<= 1;
2358  }
2359  }
2360  }
2361 
2362  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2363  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2364  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2365  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2366 
2367  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2368  skip_dct[4] = 1;
2369  skip_dct[5] = 1;
2370  } else {
2371  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2372  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2373  if (chroma_format == CHROMA_422) {
2374  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2375  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2376  } else if (chroma_format == CHROMA_444) {
2377  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2378  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2379  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2380  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2381  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2382  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2383  }
2384  }
2385  } else {
2386  op_pixels_func (*op_pix)[4];
2387  qpel_mc_func (*op_qpix)[16];
2388  uint8_t *dest_y, *dest_cb, *dest_cr;
2389 
2390  dest_y = s->c.dest[0];
2391  dest_cb = s->c.dest[1];
2392  dest_cr = s->c.dest[2];
2393 
2394  if ((!s->c.no_rounding) || s->c.pict_type == AV_PICTURE_TYPE_B) {
2395  op_pix = s->c.hdsp.put_pixels_tab;
2396  op_qpix = s->c.qdsp.put_qpel_pixels_tab;
2397  } else {
2398  op_pix = s->c.hdsp.put_no_rnd_pixels_tab;
2399  op_qpix = s->c.qdsp.put_no_rnd_qpel_pixels_tab;
2400  }
2401 
2402  if (s->c.mv_dir & MV_DIR_FORWARD) {
2403  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 0,
2404  s->c.last_pic.data,
2405  op_pix, op_qpix);
2406  op_pix = s->c.hdsp.avg_pixels_tab;
2407  op_qpix = s->c.qdsp.avg_qpel_pixels_tab;
2408  }
2409  if (s->c.mv_dir & MV_DIR_BACKWARD) {
2410  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 1,
2411  s->c.next_pic.data,
2412  op_pix, op_qpix);
2413  }
2414 
2415  if (INTERLACED_DCT(s)) {
2416  int progressive_score, interlaced_score;
2417 
2418  s->c.interlaced_dct = 0;
2419  progressive_score = s->ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2420  s->ildct_cmp[0](s, dest_y + wrap_y * 8,
2421  ptr_y + wrap_y * 8,
2422  wrap_y, 8) - 400;
2423 
2424  if (s->c.avctx->ildct_cmp == FF_CMP_VSSE)
2425  progressive_score -= 400;
2426 
2427  if (progressive_score > 0) {
2428  interlaced_score = s->ildct_cmp[0](s, dest_y, ptr_y,
2429  wrap_y * 2, 8) +
2430  s->ildct_cmp[0](s, dest_y + wrap_y,
2431  ptr_y + wrap_y,
2432  wrap_y * 2, 8);
2433 
2434  if (progressive_score > interlaced_score) {
2435  s->c.interlaced_dct = 1;
2436 
2437  dct_offset = wrap_y;
2438  uv_dct_offset = wrap_c;
2439  wrap_y <<= 1;
2440  if (chroma_format == CHROMA_422)
2441  wrap_c <<= 1;
2442  }
2443  }
2444  }
2445 
2446  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2447  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2448  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2449  dest_y + dct_offset, wrap_y);
2450  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2451  dest_y + dct_offset + 8, wrap_y);
2452 
2453  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2454  skip_dct[4] = 1;
2455  skip_dct[5] = 1;
2456  } else {
2457  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2458  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2459  if (!chroma_y_shift) { /* 422 */
2460  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2461  dest_cb + uv_dct_offset, wrap_c);
2462  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2463  dest_cr + uv_dct_offset, wrap_c);
2464  }
2465  }
2466  /* pre quantization */
2467  if (s->mc_mb_var[s->c.mb_stride * mb_y + mb_x] < 2 * s->c.qscale * s->c.qscale) {
2468  // FIXME optimize
2469  if (s->sad_cmp[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->c.qscale)
2470  skip_dct[0] = 1;
2471  if (s->sad_cmp[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->c.qscale)
2472  skip_dct[1] = 1;
2473  if (s->sad_cmp[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2474  wrap_y, 8) < 20 * s->c.qscale)
2475  skip_dct[2] = 1;
2476  if (s->sad_cmp[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2477  wrap_y, 8) < 20 * s->c.qscale)
2478  skip_dct[3] = 1;
2479  if (s->sad_cmp[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->c.qscale)
2480  skip_dct[4] = 1;
2481  if (s->sad_cmp[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->c.qscale)
2482  skip_dct[5] = 1;
2483  if (!chroma_y_shift) { /* 422 */
2484  if (s->sad_cmp[1](NULL, ptr_cb + uv_dct_offset,
2485  dest_cb + uv_dct_offset,
2486  wrap_c, 8) < 20 * s->c.qscale)
2487  skip_dct[6] = 1;
2488  if (s->sad_cmp[1](NULL, ptr_cr + uv_dct_offset,
2489  dest_cr + uv_dct_offset,
2490  wrap_c, 8) < 20 * s->c.qscale)
2491  skip_dct[7] = 1;
2492  }
2493  }
2494  }
2495 
2496  if (s->quantizer_noise_shaping) {
2497  if (!skip_dct[0])
2498  get_visual_weight(weight[0], ptr_y , wrap_y);
2499  if (!skip_dct[1])
2500  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2501  if (!skip_dct[2])
2502  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2503  if (!skip_dct[3])
2504  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2505  if (!skip_dct[4])
2506  get_visual_weight(weight[4], ptr_cb , wrap_c);
2507  if (!skip_dct[5])
2508  get_visual_weight(weight[5], ptr_cr , wrap_c);
2509  if (!chroma_y_shift) { /* 422 */
2510  if (!skip_dct[6])
2511  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2512  wrap_c);
2513  if (!skip_dct[7])
2514  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2515  wrap_c);
2516  }
2517  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2518  }
2519 
2520  /* DCT & quantize */
2521  av_assert2(s->c.out_format != FMT_MJPEG || s->c.qscale == 8);
2522  {
2523  for (i = 0; i < mb_block_count; i++) {
2524  if (!skip_dct[i]) {
2525  int overflow;
2526  s->c.block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->c.qscale, &overflow);
2527  // FIXME we could decide to change to quantizer instead of
2528  // clipping
2529  // JS: I don't think that would be a good idea it could lower
2530  // quality instead of improve it. Just INTRADC clipping
2531  // deserves changes in quantizer
2532  if (overflow)
2533  clip_coeffs(s, s->block[i], s->c.block_last_index[i]);
2534  } else
2535  s->c.block_last_index[i] = -1;
2536  }
2537  if (s->quantizer_noise_shaping) {
2538  for (i = 0; i < mb_block_count; i++) {
2539  if (!skip_dct[i]) {
2540  s->c.block_last_index[i] =
2541  dct_quantize_refine(s, s->block[i], weight[i],
2542  orig[i], i, s->c.qscale);
2543  }
2544  }
2545  }
2546 
2547  if (s->luma_elim_threshold && !s->c.mb_intra)
2548  for (i = 0; i < 4; i++)
2549  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2550  if (s->chroma_elim_threshold && !s->c.mb_intra)
2551  for (i = 4; i < mb_block_count; i++)
2552  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2553 
2554  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2555  for (i = 0; i < mb_block_count; i++) {
2556  if (s->c.block_last_index[i] == -1)
2557  s->coded_score[i] = INT_MAX / 256;
2558  }
2559  }
2560  }
2561 
2562  if ((s->c.avctx->flags & AV_CODEC_FLAG_GRAY) && s->c.mb_intra) {
2563  s->c.block_last_index[4] =
2564  s->c.block_last_index[5] = 0;
2565  s->block[4][0] =
2566  s->block[5][0] = (1024 + s->c.c_dc_scale / 2) / s->c.c_dc_scale;
2567  if (!chroma_y_shift) { /* 422 / 444 */
2568  for (i=6; i<12; i++) {
2569  s->c.block_last_index[i] = 0;
2570  s->block[i][0] = s->block[4][0];
2571  }
2572  }
2573  }
2574 
2575  // non c quantize code returns incorrect block_last_index FIXME
2576  if (s->c.alternate_scan && s->dct_quantize != dct_quantize_c) {
2577  for (i = 0; i < mb_block_count; i++) {
2578  int j;
2579  if (s->c.block_last_index[i] > 0) {
2580  for (j = 63; j > 0; j--) {
2581  if (s->block[i][s->c.intra_scantable.permutated[j]])
2582  break;
2583  }
2584  s->c.block_last_index[i] = j;
2585  }
2586  }
2587  }
2588 
2589  s->encode_mb(s, s->block, motion_x, motion_y);
2590 }
2591 
2592 static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
2593 {
2594  if (s->c.chroma_format == CHROMA_420)
2595  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2596  else if (s->c.chroma_format == CHROMA_422)
2597  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2598  else
2599  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2600 }
2601 
2602 typedef struct MBBackup {
2603  struct {
2604  int mv[2][4][2];
2605  int last_mv[2][2][2];
2608  int qscale;
2611  } c;
2613  int last_dc[3];
2615  int dquant;
2617  int16_t (*block)[64];
2619 } MBBackup;
2620 
2621 #define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE) \
2622 static inline void BEFORE ##_context_before_encode(DST_TYPE *const d, \
2623  const SRC_TYPE *const s) \
2624 { \
2625  /* FIXME is memcpy faster than a loop? */ \
2626  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2627  \
2628  /* MPEG-1 */ \
2629  d->mb_skip_run = s->mb_skip_run; \
2630  for (int i = 0; i < 3; i++) \
2631  d->last_dc[i] = s->last_dc[i]; \
2632  \
2633  /* statistics */ \
2634  d->mv_bits = s->mv_bits; \
2635  d->i_tex_bits = s->i_tex_bits; \
2636  d->p_tex_bits = s->p_tex_bits; \
2637  d->i_count = s->i_count; \
2638  d->misc_bits = s->misc_bits; \
2639  d->last_bits = 0; \
2640  \
2641  d->c.mb_skipped = 0; \
2642  d->c.qscale = s->c.qscale; \
2643  d->dquant = s->dquant; \
2644  \
2645  d->esc3_level_length = s->esc3_level_length; \
2646 } \
2647  \
2648 static inline void AFTER ## _context_after_encode(DST_TYPE *const d, \
2649  const SRC_TYPE *const s, \
2650  int data_partitioning) \
2651 { \
2652  /* FIXME is memcpy faster than a loop? */ \
2653  memcpy(d->c.mv, s->c.mv, 2*4*2*sizeof(int)); \
2654  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2655  \
2656  /* MPEG-1 */ \
2657  d->mb_skip_run = s->mb_skip_run; \
2658  for (int i = 0; i < 3; i++) \
2659  d->last_dc[i] = s->last_dc[i]; \
2660  \
2661  /* statistics */ \
2662  d->mv_bits = s->mv_bits; \
2663  d->i_tex_bits = s->i_tex_bits; \
2664  d->p_tex_bits = s->p_tex_bits; \
2665  d->i_count = s->i_count; \
2666  d->misc_bits = s->misc_bits; \
2667  \
2668  d->c.mb_intra = s->c.mb_intra; \
2669  d->c.mb_skipped = s->c.mb_skipped; \
2670  d->c.mv_type = s->c.mv_type; \
2671  d->c.mv_dir = s->c.mv_dir; \
2672  d->pb = s->pb; \
2673  if (data_partitioning) { \
2674  d->pb2 = s->pb2; \
2675  d->tex_pb = s->tex_pb; \
2676  } \
2677  d->block = s->block; \
2678  for (int i = 0; i < 8; i++) \
2679  d->c.block_last_index[i] = s->c.block_last_index[i]; \
2680  d->c.interlaced_dct = s->c.interlaced_dct; \
2681  d->c.qscale = s->c.qscale; \
2682  \
2683  d->esc3_level_length = s->esc3_level_length; \
2684 }
2685 
2686 COPY_CONTEXT(backup, save, MBBackup, MPVEncContext)
2687 COPY_CONTEXT(reset, store, MPVEncContext, MBBackup)
2688 
2689 static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best,
2690  PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2691  int *dmin, int *next_block, int motion_x, int motion_y)
2692 {
2693  int score;
2694  uint8_t *dest_backup[3];
2695 
2696  reset_context_before_encode(s, backup);
2697 
2698  s->block = s->blocks[*next_block];
2699  s->pb = pb[*next_block];
2700  if (s->data_partitioning) {
2701  s->pb2 = pb2 [*next_block];
2702  s->tex_pb= tex_pb[*next_block];
2703  }
2704 
2705  if(*next_block){
2706  memcpy(dest_backup, s->c.dest, sizeof(s->c.dest));
2707  s->c.dest[0] = s->c.sc.rd_scratchpad;
2708  s->c.dest[1] = s->c.sc.rd_scratchpad + 16*s->c.linesize;
2709  s->c.dest[2] = s->c.sc.rd_scratchpad + 16*s->c.linesize + 8;
2710  av_assert0(s->c.linesize >= 32); //FIXME
2711  }
2712 
2713  encode_mb(s, motion_x, motion_y);
2714 
2715  score= put_bits_count(&s->pb);
2716  if (s->data_partitioning) {
2717  score+= put_bits_count(&s->pb2);
2718  score+= put_bits_count(&s->tex_pb);
2719  }
2720 
2721  if (s->c.avctx->mb_decision == FF_MB_DECISION_RD) {
2722  mpv_reconstruct_mb(s, s->block);
2723 
2724  score *= s->lambda2;
2725  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2726  }
2727 
2728  if(*next_block){
2729  memcpy(s->c.dest, dest_backup, sizeof(s->c.dest));
2730  }
2731 
2732  if(score<*dmin){
2733  *dmin= score;
2734  *next_block^=1;
2735 
2736  save_context_after_encode(best, s, s->data_partitioning);
2737  }
2738 }
2739 
2740 static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
2741 {
2742  const uint32_t *sq = ff_square_tab + 256;
2743  int acc=0;
2744  int x,y;
2745 
2746  if(w==16 && h==16)
2747  return s->sse_cmp[0](NULL, src1, src2, stride, 16);
2748  else if(w==8 && h==8)
2749  return s->sse_cmp[1](NULL, src1, src2, stride, 8);
2750 
2751  for(y=0; y<h; y++){
2752  for(x=0; x<w; x++){
2753  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2754  }
2755  }
2756 
2757  av_assert2(acc>=0);
2758 
2759  return acc;
2760 }
2761 
2762 static int sse_mb(MPVEncContext *const s)
2763 {
2764  int w= 16;
2765  int h= 16;
2766  int chroma_mb_w = w >> s->c.chroma_x_shift;
2767  int chroma_mb_h = h >> s->c.chroma_y_shift;
2768 
2769  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
2770  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
2771 
2772  if(w==16 && h==16)
2773  return s->n_sse_cmp[0](s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2774  s->c.dest[0], s->c.linesize, 16) +
2775  s->n_sse_cmp[1](s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2776  s->c.dest[1], s->c.uvlinesize, chroma_mb_h) +
2777  s->n_sse_cmp[1](s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2778  s->c.dest[2], s->c.uvlinesize, chroma_mb_h);
2779  else
2780  return sse(s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2781  s->c.dest[0], w, h, s->c.linesize) +
2782  sse(s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2783  s->c.dest[1], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize) +
2784  sse(s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2785  s->c.dest[2], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize);
2786 }
2787 
2789  MPVEncContext *const s = *(void**)arg;
2790 
2791 
2792  s->me.pre_pass = 1;
2793  s->me.dia_size = s->c.avctx->pre_dia_size;
2794  s->c.first_slice_line = 1;
2795  for (s->c.mb_y = s->c.end_mb_y - 1; s->c.mb_y >= s->c.start_mb_y; s->c.mb_y--) {
2796  for (s->c.mb_x = s->c.mb_width - 1; s->c.mb_x >=0 ; s->c.mb_x--)
2797  ff_pre_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2798  s->c.first_slice_line = 0;
2799  }
2800 
2801  s->me.pre_pass = 0;
2802 
2803  return 0;
2804 }
2805 
2807  MPVEncContext *const s = *(void**)arg;
2808 
2809  s->me.dia_size = s->c.avctx->dia_size;
2810  s->c.first_slice_line = 1;
2811  for (s->c.mb_y = s->c.start_mb_y; s->c.mb_y < s->c.end_mb_y; s->c.mb_y++) {
2812  s->c.mb_x = 0; //for block init below
2813  ff_init_block_index(&s->c);
2814  for (s->c.mb_x = 0; s->c.mb_x < s->c.mb_width; s->c.mb_x++) {
2815  s->c.block_index[0] += 2;
2816  s->c.block_index[1] += 2;
2817  s->c.block_index[2] += 2;
2818  s->c.block_index[3] += 2;
2819 
2820  /* compute motion vector & mb_type and store in context */
2821  if (s->c.pict_type == AV_PICTURE_TYPE_B)
2822  ff_estimate_b_frame_motion(s, s->c.mb_x, s->c.mb_y);
2823  else
2824  ff_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2825  }
2826  s->c.first_slice_line = 0;
2827  }
2828  return 0;
2829 }
2830 
2831 static int mb_var_thread(AVCodecContext *c, void *arg){
2832  MPVEncContext *const s = *(void**)arg;
2833 
2834  for (int mb_y = s->c.start_mb_y; mb_y < s->c.end_mb_y; mb_y++) {
2835  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
2836  int xx = mb_x * 16;
2837  int yy = mb_y * 16;
2838  const uint8_t *pix = s->new_pic->data[0] + (yy * s->c.linesize) + xx;
2839  int varc;
2840  int sum = s->mpvencdsp.pix_sum(pix, s->c.linesize);
2841 
2842  varc = (s->mpvencdsp.pix_norm1(pix, s->c.linesize) -
2843  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2844 
2845  s->mb_var [s->c.mb_stride * mb_y + mb_x] = varc;
2846  s->mb_mean[s->c.mb_stride * mb_y + mb_x] = (sum+128)>>8;
2847  s->me.mb_var_sum_temp += varc;
2848  }
2849  }
2850  return 0;
2851 }
2852 
2853 static void write_slice_end(MPVEncContext *const s)
2854 {
2855  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4) {
2856  if (s->partitioned_frame)
2858 
2859  ff_mpeg4_stuffing(&s->pb);
2860  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2861  s->c.out_format == FMT_MJPEG) {
2863  } else if (CONFIG_SPEEDHQ_ENCODER && s->c.out_format == FMT_SPEEDHQ) {
2865  }
2866 
2867  flush_put_bits(&s->pb);
2868 
2869  if ((s->c.avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2870  s->misc_bits+= get_bits_diff(s);
2871 }
2872 
2873 static void write_mb_info(MPVEncContext *const s)
2874 {
2875  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2876  int offset = put_bits_count(&s->pb);
2877  int mba = s->c.mb_x + s->c.mb_width * (s->c.mb_y % s->gob_index);
2878  int gobn = s->c.mb_y / s->gob_index;
2879  int pred_x, pred_y;
2880  if (CONFIG_H263_ENCODER)
2881  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
2882  bytestream_put_le32(&ptr, offset);
2883  bytestream_put_byte(&ptr, s->c.qscale);
2884  bytestream_put_byte(&ptr, gobn);
2885  bytestream_put_le16(&ptr, mba);
2886  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2887  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2888  /* 4MV not implemented */
2889  bytestream_put_byte(&ptr, 0); /* hmv2 */
2890  bytestream_put_byte(&ptr, 0); /* vmv2 */
2891 }
2892 
2893 static void update_mb_info(MPVEncContext *const s)
2894 {
2895  if (!s->mb_info)
2896  return;
2897  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2898  s->mb_info_size += 12;
2899  s->prev_mb_info = s->last_mb_info;
2900  }
2901 
2902  s->last_mb_info = put_bytes_count(&s->pb, 0);
2903  if (!s->mb_info_size)
2904  s->mb_info_size += 12;
2905  write_mb_info(s);
2906 }
2907 
2908 int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
2909 {
2910  if (put_bytes_left(&s->pb, 0) < threshold
2911  && s->c.slice_context_count == 1
2912  && s->pb.buf == s->c.avctx->internal->byte_buffer) {
2913  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2914 
2915  uint8_t *new_buffer = NULL;
2916  int new_buffer_size = 0;
2917 
2918  if ((s->c.avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2919  av_log(s->c.avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2920  return AVERROR(ENOMEM);
2921  }
2922 
2923  emms_c();
2924 
2925  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2926  s->c.avctx->internal->byte_buffer_size + size_increase);
2927  if (!new_buffer)
2928  return AVERROR(ENOMEM);
2929 
2930  memcpy(new_buffer, s->c.avctx->internal->byte_buffer, s->c.avctx->internal->byte_buffer_size);
2931  av_free(s->c.avctx->internal->byte_buffer);
2932  s->c.avctx->internal->byte_buffer = new_buffer;
2933  s->c.avctx->internal->byte_buffer_size = new_buffer_size;
2934  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2935  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2936  }
2937  if (put_bytes_left(&s->pb, 0) < threshold)
2938  return AVERROR(EINVAL);
2939  return 0;
2940 }
2941 
2942 static int encode_thread(AVCodecContext *c, void *arg){
2943  MPVEncContext *const s = *(void**)arg;
2944  int chr_h = 16 >> s->c.chroma_y_shift;
2945  int i;
2946  MBBackup best_s = { 0 }, backup_s;
2947  uint8_t bit_buf[2][MAX_MB_BYTES];
2948  // + 2 because ff_copy_bits() overreads
2949  uint8_t bit_buf2[2][MAX_PB2_MB_SIZE + 2];
2950  uint8_t bit_buf_tex[2][MAX_AC_TEX_MB_SIZE + 2];
2951  PutBitContext pb[2], pb2[2], tex_pb[2];
2952 
2953  for(i=0; i<2; i++){
2954  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2955  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_PB2_MB_SIZE);
2956  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_AC_TEX_MB_SIZE);
2957  }
2958 
2959  s->last_bits= put_bits_count(&s->pb);
2960  s->mv_bits=0;
2961  s->misc_bits=0;
2962  s->i_tex_bits=0;
2963  s->p_tex_bits=0;
2964  s->i_count=0;
2965 
2966  for(i=0; i<3; i++){
2967  /* init last dc values */
2968  /* note: quant matrix value (8) is implied here */
2969  s->last_dc[i] = 128 << s->c.intra_dc_precision;
2970 
2971  s->encoding_error[i] = 0;
2972  }
2973  if (s->c.codec_id == AV_CODEC_ID_AMV) {
2974  s->last_dc[0] = 128 * 8 / 13;
2975  s->last_dc[1] = 128 * 8 / 14;
2976  s->last_dc[2] = 128 * 8 / 14;
2977 #if CONFIG_MPEG4_ENCODER
2978  } else if (s->partitioned_frame) {
2979  av_assert1(s->c.codec_id == AV_CODEC_ID_MPEG4);
2981 #endif
2982  }
2983  s->mb_skip_run = 0;
2984  memset(s->c.last_mv, 0, sizeof(s->c.last_mv));
2985 
2986  s->last_mv_dir = 0;
2987 
2988  s->c.resync_mb_x = 0;
2989  s->c.resync_mb_y = 0;
2990  s->c.first_slice_line = 1;
2991  s->ptr_lastgob = s->pb.buf;
2992  for (int mb_y_order = s->c.start_mb_y; mb_y_order < s->c.end_mb_y; mb_y_order++) {
2993  int mb_y;
2994  if (CONFIG_SPEEDHQ_ENCODER && s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
2995  int first_in_slice;
2996  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->c.mb_height, &first_in_slice);
2997  if (first_in_slice && mb_y_order != s->c.start_mb_y)
2999  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024;
3000  } else {
3001  mb_y = mb_y_order;
3002  }
3003  s->c.mb_x = 0;
3004  s->c.mb_y = mb_y;
3005 
3006  ff_set_qscale(&s->c, s->c.qscale);
3007  ff_init_block_index(&s->c);
3008 
3009  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
3010  int mb_type, xy;
3011 // int d;
3012  int dmin= INT_MAX;
3013  int dir;
3014  int size_increase = s->c.avctx->internal->byte_buffer_size/4
3015  + s->c.mb_width*MAX_MB_BYTES;
3016 
3018  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3019  av_log(s->c.avctx, AV_LOG_ERROR, "encoded frame too large\n");
3020  return -1;
3021  }
3022  if (s->data_partitioning) {
3023  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3024  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3025  av_log(s->c.avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3026  return -1;
3027  }
3028  }
3029 
3030  s->c.mb_x = mb_x;
3031  s->c.mb_y = mb_y; // moved into loop, can get changed by H.261
3032  ff_update_block_index(&s->c, 8, 0, s->c.chroma_x_shift);
3033 
3034  if (CONFIG_H261_ENCODER && s->c.codec_id == AV_CODEC_ID_H261)
3036  xy = s->c.mb_y * s->c.mb_stride + s->c.mb_x;
3037  mb_type = s->mb_type[xy];
3038 
3039  /* write gob / video packet header */
3040  if(s->rtp_mode){
3041  int current_packet_size, is_gob_start;
3042 
3043  current_packet_size = put_bytes_count(&s->pb, 1)
3044  - (s->ptr_lastgob - s->pb.buf);
3045 
3046  is_gob_start = s->rtp_payload_size &&
3047  current_packet_size >= s->rtp_payload_size &&
3048  mb_y + mb_x > 0;
3049 
3050  if (s->c.start_mb_y == mb_y && mb_y > 0 && mb_x == 0) is_gob_start = 1;
3051 
3052  switch (s->c.codec_id) {
3053  case AV_CODEC_ID_H263:
3054  case AV_CODEC_ID_H263P:
3055  if (!s->h263_slice_structured)
3056  if (s->c.mb_x || s->c.mb_y % s->gob_index) is_gob_start = 0;
3057  break;
3059  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3061  if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO && s->c.mb_y >= 175 ||
3062  s->mb_skip_run)
3063  is_gob_start=0;
3064  break;
3065  case AV_CODEC_ID_MJPEG:
3066  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3067  break;
3068  }
3069 
3070  if(is_gob_start){
3071  if (s->c.start_mb_y != mb_y || mb_x != 0) {
3072  write_slice_end(s);
3073 
3074  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 && s->partitioned_frame)
3076  }
3077 
3078  av_assert2((put_bits_count(&s->pb)&7) == 0);
3079  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3080 
3081  if (s->error_rate && s->c.resync_mb_x + s->c.resync_mb_y > 0) {
3082  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->c.mb_x + s->c.mb_y;
3083  int d = 100 / s->error_rate;
3084  if(r % d == 0){
3085  current_packet_size=0;
3086  s->pb.buf_ptr= s->ptr_lastgob;
3087  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3088  }
3089  }
3090 
3091  switch (s->c.codec_id) {
3092  case AV_CODEC_ID_MPEG4:
3093  if (CONFIG_MPEG4_ENCODER) {
3097  }
3098  break;
3101  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3104  }
3105  break;
3106 #if CONFIG_H263P_ENCODER
3107  case AV_CODEC_ID_H263P:
3108  if (s->c.dc_val)
3110  // fallthrough
3111 #endif
3112  case AV_CODEC_ID_H263:
3113  if (CONFIG_H263_ENCODER) {
3114  if (s->mb_info && put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info)
3115  s->mb_info_size += 12;
3116 
3118  s->prev_mb_info = put_bits_count(&s->pb)/8;
3119  }
3120  break;
3121  }
3122 
3123  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS1) {
3124  int bits= put_bits_count(&s->pb);
3125  s->misc_bits+= bits - s->last_bits;
3126  s->last_bits= bits;
3127  }
3128 
3129  s->ptr_lastgob += current_packet_size;
3130  s->c.first_slice_line = 1;
3131  s->c.resync_mb_x = mb_x;
3132  s->c.resync_mb_y = mb_y;
3133  }
3134  }
3135 
3136  if (s->c.resync_mb_x == s->c.mb_x &&
3137  s->c.resync_mb_y+1 == s->c.mb_y)
3138  s->c.first_slice_line = 0;
3139 
3140  s->c.mb_skipped = 0;
3141  s->dquant=0; //only for QP_RD
3142 
3143  update_mb_info(s);
3144 
3145  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3146  int next_block=0;
3147  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3148 
3149  backup_context_before_encode(&backup_s, s);
3150  backup_s.pb= s->pb;
3151  if (s->data_partitioning) {
3152  backup_s.pb2= s->pb2;
3153  backup_s.tex_pb= s->tex_pb;
3154  }
3155 
3156  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3157  s->c.mv_dir = MV_DIR_FORWARD;
3158  s->c.mv_type = MV_TYPE_16X16;
3159  s->c.mb_intra = 0;
3160  s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3161  s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3162  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3163  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3164  }
3165  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3166  s->c.mv_dir = MV_DIR_FORWARD;
3167  s->c.mv_type = MV_TYPE_FIELD;
3168  s->c.mb_intra = 0;
3169  for(i=0; i<2; i++){
3170  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3171  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3172  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3173  }
3174  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3175  &dmin, &next_block, 0, 0);
3176  }
3177  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3178  s->c.mv_dir = MV_DIR_FORWARD;
3179  s->c.mv_type = MV_TYPE_16X16;
3180  s->c.mb_intra = 0;
3181  s->c.mv[0][0][0] = 0;
3182  s->c.mv[0][0][1] = 0;
3183  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3184  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3185  }
3186  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3187  s->c.mv_dir = MV_DIR_FORWARD;
3188  s->c.mv_type = MV_TYPE_8X8;
3189  s->c.mb_intra = 0;
3190  for(i=0; i<4; i++){
3191  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3192  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3193  }
3194  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3195  &dmin, &next_block, 0, 0);
3196  }
3197  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3198  s->c.mv_dir = MV_DIR_FORWARD;
3199  s->c.mv_type = MV_TYPE_16X16;
3200  s->c.mb_intra = 0;
3201  s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3202  s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3203  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3204  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3205  }
3206  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3207  s->c.mv_dir = MV_DIR_BACKWARD;
3208  s->c.mv_type = MV_TYPE_16X16;
3209  s->c.mb_intra = 0;
3210  s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3211  s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3212  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3213  &dmin, &next_block, s->c.mv[1][0][0], s->c.mv[1][0][1]);
3214  }
3215  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3216  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3217  s->c.mv_type = MV_TYPE_16X16;
3218  s->c.mb_intra = 0;
3219  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3220  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3221  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3222  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3223  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3224  &dmin, &next_block, 0, 0);
3225  }
3226  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3227  s->c.mv_dir = MV_DIR_FORWARD;
3228  s->c.mv_type = MV_TYPE_FIELD;
3229  s->c.mb_intra = 0;
3230  for(i=0; i<2; i++){
3231  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3232  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3233  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3234  }
3235  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3236  &dmin, &next_block, 0, 0);
3237  }
3238  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3239  s->c.mv_dir = MV_DIR_BACKWARD;
3240  s->c.mv_type = MV_TYPE_FIELD;
3241  s->c.mb_intra = 0;
3242  for(i=0; i<2; i++){
3243  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3244  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3245  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3246  }
3247  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3248  &dmin, &next_block, 0, 0);
3249  }
3250  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3251  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3252  s->c.mv_type = MV_TYPE_FIELD;
3253  s->c.mb_intra = 0;
3254  for(dir=0; dir<2; dir++){
3255  for(i=0; i<2; i++){
3256  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3257  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3258  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3259  }
3260  }
3261  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3262  &dmin, &next_block, 0, 0);
3263  }
3264  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3265  s->c.mv_dir = 0;
3266  s->c.mv_type = MV_TYPE_16X16;
3267  s->c.mb_intra = 1;
3268  s->c.mv[0][0][0] = 0;
3269  s->c.mv[0][0][1] = 0;
3270  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3271  &dmin, &next_block, 0, 0);
3272  s->c.mbintra_table[xy] = 1;
3273  }
3274 
3275  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3276  if (best_s.c.mv_type == MV_TYPE_16X16) { //FIXME move 4mv after QPRD
3277  const int last_qp = backup_s.c.qscale;
3278  int qpi, qp, dc[6];
3279  int16_t ac[6][16];
3280  const int mvdir = (best_s.c.mv_dir & MV_DIR_BACKWARD) ? 1 : 0;
3281  static const int dquant_tab[4]={-1,1,-2,2};
3282  int storecoefs = s->c.mb_intra && s->c.dc_val;
3283 
3284  av_assert2(backup_s.dquant == 0);
3285 
3286  //FIXME intra
3287  s->c.mv_dir = best_s.c.mv_dir;
3288  s->c.mv_type = MV_TYPE_16X16;
3289  s->c.mb_intra = best_s.c.mb_intra;
3290  s->c.mv[0][0][0] = best_s.c.mv[0][0][0];
3291  s->c.mv[0][0][1] = best_s.c.mv[0][0][1];
3292  s->c.mv[1][0][0] = best_s.c.mv[1][0][0];
3293  s->c.mv[1][0][1] = best_s.c.mv[1][0][1];
3294 
3295  qpi = s->c.pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3296  for(; qpi<4; qpi++){
3297  int dquant= dquant_tab[qpi];
3298  qp= last_qp + dquant;
3299  if (qp < s->c.avctx->qmin || qp > s->c.avctx->qmax)
3300  continue;
3301  backup_s.dquant= dquant;
3302  if(storecoefs){
3303  for(i=0; i<6; i++){
3304  dc[i] = s->c.dc_val[s->c.block_index[i]];
3305  memcpy(ac[i], s->c.ac_val[s->c.block_index[i]], sizeof(*s->c.ac_val));
3306  }
3307  }
3308 
3309  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3310  &dmin, &next_block, s->c.mv[mvdir][0][0], s->c.mv[mvdir][0][1]);
3311  if (best_s.c.qscale != qp) {
3312  if(storecoefs){
3313  for(i=0; i<6; i++){
3314  s->c.dc_val[s->c.block_index[i]] = dc[i];
3315  memcpy(s->c.ac_val[s->c.block_index[i]], ac[i], sizeof(*s->c.ac_val));
3316  }
3317  }
3318  }
3319  }
3320  }
3321  }
3322  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3323  int mx= s->b_direct_mv_table[xy][0];
3324  int my= s->b_direct_mv_table[xy][1];
3325 
3326  backup_s.dquant = 0;
3327  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3328  s->c.mb_intra = 0;
3329  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3330  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3331  &dmin, &next_block, mx, my);
3332  }
3333  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3334  backup_s.dquant = 0;
3335  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3336  s->c.mb_intra = 0;
3337  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3338  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3339  &dmin, &next_block, 0, 0);
3340  }
3341  if (!best_s.c.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3342  int coded=0;
3343  for(i=0; i<6; i++)
3344  coded |= s->c.block_last_index[i];
3345  if(coded){
3346  int mx,my;
3347  memcpy(s->c.mv, best_s.c.mv, sizeof(s->c.mv));
3348  if (CONFIG_MPEG4_ENCODER && best_s.c.mv_dir & MV_DIRECT) {
3349  mx=my=0; //FIXME find the one we actually used
3350  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3351  } else if (best_s.c.mv_dir & MV_DIR_BACKWARD) {
3352  mx = s->c.mv[1][0][0];
3353  my = s->c.mv[1][0][1];
3354  }else{
3355  mx = s->c.mv[0][0][0];
3356  my = s->c.mv[0][0][1];
3357  }
3358 
3359  s->c.mv_dir = best_s.c.mv_dir;
3360  s->c.mv_type = best_s.c.mv_type;
3361  s->c.mb_intra = 0;
3362 /* s->c.mv[0][0][0] = best_s.mv[0][0][0];
3363  s->c.mv[0][0][1] = best_s.mv[0][0][1];
3364  s->c.mv[1][0][0] = best_s.mv[1][0][0];
3365  s->c.mv[1][0][1] = best_s.mv[1][0][1];*/
3366  backup_s.dquant= 0;
3367  s->skipdct=1;
3368  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3369  &dmin, &next_block, mx, my);
3370  s->skipdct=0;
3371  }
3372  }
3373 
3374  store_context_after_encode(s, &best_s, s->data_partitioning);
3375 
3376  pb_bits_count= put_bits_count(&s->pb);
3377  flush_put_bits(&s->pb);
3378  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3379  s->pb= backup_s.pb;
3380 
3381  if (s->data_partitioning) {
3382  pb2_bits_count= put_bits_count(&s->pb2);
3383  flush_put_bits(&s->pb2);
3384  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3385  s->pb2= backup_s.pb2;
3386 
3387  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3388  flush_put_bits(&s->tex_pb);
3389  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3390  s->tex_pb= backup_s.tex_pb;
3391  }
3392  s->last_bits= put_bits_count(&s->pb);
3393 
3394  if (CONFIG_H263_ENCODER &&
3395  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3397 
3398  if(next_block==0){ //FIXME 16 vs linesize16
3399  s->c.hdsp.put_pixels_tab[0][0](s->c.dest[0], s->c.sc.rd_scratchpad , s->c.linesize ,16);
3400  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[1], s->c.sc.rd_scratchpad + 16*s->c.linesize , s->c.uvlinesize, 8);
3401  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[2], s->c.sc.rd_scratchpad + 16*s->c.linesize + 8, s->c.uvlinesize, 8);
3402  }
3403 
3404  if (s->c.avctx->mb_decision == FF_MB_DECISION_BITS)
3405  mpv_reconstruct_mb(s, s->block);
3406  } else {
3407  int motion_x = 0, motion_y = 0;
3408  s->c.mv_type = MV_TYPE_16X16;
3409  // only one MB-Type possible
3410 
3411  switch(mb_type){
3413  s->c.mv_dir = 0;
3414  s->c.mb_intra = 1;
3415  motion_x= s->c.mv[0][0][0] = 0;
3416  motion_y= s->c.mv[0][0][1] = 0;
3417  s->c.mbintra_table[xy] = 1;
3418  break;
3420  s->c.mv_dir = MV_DIR_FORWARD;
3421  s->c.mb_intra = 0;
3422  motion_x= s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3423  motion_y= s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3424  break;
3426  s->c.mv_dir = MV_DIR_FORWARD;
3427  s->c.mv_type = MV_TYPE_FIELD;
3428  s->c.mb_intra = 0;
3429  for(i=0; i<2; i++){
3430  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3431  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3432  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3433  }
3434  break;
3436  s->c.mv_dir = MV_DIR_FORWARD;
3437  s->c.mv_type = MV_TYPE_8X8;
3438  s->c.mb_intra = 0;
3439  for(i=0; i<4; i++){
3440  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3441  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3442  }
3443  break;
3445  if (CONFIG_MPEG4_ENCODER) {
3447  s->c.mb_intra = 0;
3448  motion_x=s->b_direct_mv_table[xy][0];
3449  motion_y=s->b_direct_mv_table[xy][1];
3450  ff_mpeg4_set_direct_mv(&s->c, motion_x, motion_y);
3451  }
3452  break;
3454  if (CONFIG_MPEG4_ENCODER) {
3456  s->c.mb_intra = 0;
3457  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3458  }
3459  break;
3461  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3462  s->c.mb_intra = 0;
3463  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3464  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3465  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3466  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3467  break;
3469  s->c.mv_dir = MV_DIR_BACKWARD;
3470  s->c.mb_intra = 0;
3471  motion_x= s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3472  motion_y= s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3473  break;
3475  s->c.mv_dir = MV_DIR_FORWARD;
3476  s->c.mb_intra = 0;
3477  motion_x= s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3478  motion_y= s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3479  break;
3481  s->c.mv_dir = MV_DIR_FORWARD;
3482  s->c.mv_type = MV_TYPE_FIELD;
3483  s->c.mb_intra = 0;
3484  for(i=0; i<2; i++){
3485  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3486  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3487  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3488  }
3489  break;
3491  s->c.mv_dir = MV_DIR_BACKWARD;
3492  s->c.mv_type = MV_TYPE_FIELD;
3493  s->c.mb_intra = 0;
3494  for(i=0; i<2; i++){
3495  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3496  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3497  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3498  }
3499  break;
3501  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3502  s->c.mv_type = MV_TYPE_FIELD;
3503  s->c.mb_intra = 0;
3504  for(dir=0; dir<2; dir++){
3505  for(i=0; i<2; i++){
3506  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3507  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3508  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3509  }
3510  }
3511  break;
3512  default:
3513  av_unreachable("There is a case for every CANDIDATE_MB_TYPE_* "
3514  "except CANDIDATE_MB_TYPE_SKIPPED which is never "
3515  "the only candidate (always coupled with INTER) "
3516  "so that it never reaches this switch");
3517  }
3518 
3519  encode_mb(s, motion_x, motion_y);
3520 
3521  // RAL: Update last macroblock type
3522  s->last_mv_dir = s->c.mv_dir;
3523 
3524  if (CONFIG_H263_ENCODER &&
3525  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3527 
3528  mpv_reconstruct_mb(s, s->block);
3529  }
3530 
3531  s->c.cur_pic.qscale_table[xy] = s->c.qscale;
3532 
3533  /* clean the MV table in IPS frames for direct mode in B-frames */
3534  if (s->c.mb_intra /* && I,P,S_TYPE */) {
3535  s->p_mv_table[xy][0]=0;
3536  s->p_mv_table[xy][1]=0;
3537 #if CONFIG_H263_ENCODER
3538  } else if (s->c.h263_pred || s->c.h263_aic) {
3540 #endif
3541  }
3542 
3543  if (s->c.avctx->flags & AV_CODEC_FLAG_PSNR) {
3544  int w= 16;
3545  int h= 16;
3546 
3547  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
3548  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
3549 
3550  s->encoding_error[0] += sse(
3551  s, s->new_pic->data[0] + s->c.mb_x*16 + s->c.mb_y*s->c.linesize*16,
3552  s->c.dest[0], w, h, s->c.linesize);
3553  s->encoding_error[1] += sse(
3554  s, s->new_pic->data[1] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3555  s->c.dest[1], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3556  s->encoding_error[2] += sse(
3557  s, s->new_pic->data[2] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3558  s->c.dest[2], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3559  }
3560  if (s->loop_filter) {
3561  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263)
3562  ff_h263_loop_filter(&s->c);
3563  }
3564  ff_dlog(s->c.avctx, "MB %d %d bits\n",
3565  s->c.mb_x + s->c.mb_y * s->c.mb_stride, put_bits_count(&s->pb));
3566  }
3567  }
3568 
3569 #if CONFIG_MSMPEG4ENC
3570  //not beautiful here but we must write it before flushing so it has to be here
3571  if (s->c.msmpeg4_version != MSMP4_UNUSED && s->c.msmpeg4_version < MSMP4_WMV1 &&
3572  s->c.pict_type == AV_PICTURE_TYPE_I)
3574 #endif
3575 
3576  write_slice_end(s);
3577 
3578  return 0;
3579 }
3580 
3581 #define ADD(field) dst->field += src->field;
3582 #define MERGE(field) dst->field += src->field; src->field=0
3584 {
3585  ADD(me.scene_change_score);
3586  ADD(me.mc_mb_var_sum_temp);
3587  ADD(me.mb_var_sum_temp);
3588 }
3589 
3591 {
3592  int i;
3593 
3594  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3595  MERGE(dct_count[1]);
3596  ADD(mv_bits);
3597  ADD(i_tex_bits);
3598  ADD(p_tex_bits);
3599  ADD(i_count);
3600  ADD(misc_bits);
3601  ADD(encoding_error[0]);
3602  ADD(encoding_error[1]);
3603  ADD(encoding_error[2]);
3604 
3605  if (dst->dct_error_sum) {
3606  for(i=0; i<64; i++){
3607  MERGE(dct_error_sum[0][i]);
3608  MERGE(dct_error_sum[1][i]);
3609  }
3610  }
3611 
3612  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3613  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3614  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3615  flush_put_bits(&dst->pb);
3616 }
3617 
3618 static int estimate_qp(MPVMainEncContext *const m, int dry_run)
3619 {
3620  MPVEncContext *const s = &m->s;
3621 
3622  if (m->next_lambda){
3623  s->c.cur_pic.ptr->f->quality = m->next_lambda;
3624  if(!dry_run) m->next_lambda= 0;
3625  } else if (!m->fixed_qscale) {
3626  int quality = ff_rate_estimate_qscale(m, dry_run);
3627  s->c.cur_pic.ptr->f->quality = quality;
3628  if (s->c.cur_pic.ptr->f->quality < 0)
3629  return -1;
3630  }
3631 
3632  if(s->adaptive_quant){
3633  init_qscale_tab(s);
3634 
3635  switch (s->c.codec_id) {
3636  case AV_CODEC_ID_MPEG4:
3637  if (CONFIG_MPEG4_ENCODER)
3639  break;
3640  case AV_CODEC_ID_H263:
3641  case AV_CODEC_ID_H263P:
3642  case AV_CODEC_ID_FLV1:
3643  if (CONFIG_H263_ENCODER)
3645  break;
3646  }
3647 
3648  s->lambda = s->lambda_table[0];
3649  //FIXME broken
3650  }else
3651  s->lambda = s->c.cur_pic.ptr->f->quality;
3652  update_qscale(m);
3653  return 0;
3654 }
3655 
3656 /* must be called before writing the header */
3658 {
3659  av_assert1(s->c.cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3660  s->c.time = s->c.cur_pic.ptr->f->pts * s->c.avctx->time_base.num;
3661 
3662  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3663  s->c.pb_time = s->c.pp_time - (s->c.last_non_b_time - s->c.time);
3664  av_assert1(s->c.pb_time > 0 && s->c.pb_time < s->c.pp_time);
3665  }else{
3666  s->c.pp_time = s->c.time - s->c.last_non_b_time;
3667  s->c.last_non_b_time = s->c.time;
3668  av_assert1(s->picture_number == 0 || s->c.pp_time > 0);
3669  }
3670 }
3671 
3672 static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
3673 {
3674  MPVEncContext *const s = &m->s;
3675  int i, ret;
3676  int bits;
3677  int context_count = s->c.slice_context_count;
3678 
3679  /* we need to initialize some time vars before we can encode B-frames */
3680  // RAL: Condition added for MPEG1VIDEO
3681  if (s->c.out_format == FMT_MPEG1 || (s->c.h263_pred && s->c.msmpeg4_version == MSMP4_UNUSED))
3683  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4)
3685 
3686 // s->lambda = s->c.cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3687 
3688  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3689  s->c.no_rounding = s->c.msmpeg4_version >= MSMP4_V3;
3690  } else if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3691  s->c.no_rounding ^= s->flipflop_rounding;
3692  }
3693 
3694  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
3695  ret = estimate_qp(m, 1);
3696  if (ret < 0)
3697  return ret;
3698  ff_get_2pass_fcode(m);
3699  } else if (!(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3700  if (s->c.pict_type == AV_PICTURE_TYPE_B)
3701  s->lambda = m->last_lambda_for[s->c.pict_type];
3702  else
3703  s->lambda = m->last_lambda_for[m->last_non_b_pict_type];
3704  update_qscale(m);
3705  }
3706 
3707  s->c.mb_intra = 0; //for the rate distortion & bit compare functions
3708  for (int i = 0; i < context_count; i++) {
3709  MPVEncContext *const slice = s->c.enc_contexts[i];
3710  int h = s->c.mb_height;
3711  uint8_t *start = pkt->data + (int64_t)pkt->size * slice->c.start_mb_y / h;
3712  uint8_t *end = pkt->data + (int64_t)pkt->size * slice->c. end_mb_y / h;
3713 
3714  init_put_bits(&slice->pb, start, end - start);
3715 
3716  if (i) {
3717  ret = ff_update_duplicate_context(&slice->c, &s->c);
3718  if (ret < 0)
3719  return ret;
3720  slice->lambda = s->lambda;
3721  slice->lambda2 = s->lambda2;
3722  }
3723  slice->me.temp = slice->me.scratchpad = slice->c.sc.scratchpad_buf;
3724  ff_me_init_pic(slice);
3725  }
3726 
3727  /* Estimate motion for every MB */
3728  if (s->c.pict_type != AV_PICTURE_TYPE_I) {
3729  s->lambda = (s->lambda * m->me_penalty_compensation + 128) >> 8;
3730  s->lambda2 = (s->lambda2 * (int64_t) m->me_penalty_compensation + 128) >> 8;
3731  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3732  if ((m->me_pre && m->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3733  m->me_pre == 2) {
3734  s->c.avctx->execute(s->c.avctx, pre_estimate_motion_thread,
3735  &s->c.enc_contexts[0], NULL,
3736  context_count, sizeof(void*));
3737  }
3738  }
3739 
3740  s->c.avctx->execute(s->c.avctx, estimate_motion_thread, &s->c.enc_contexts[0],
3741  NULL, context_count, sizeof(void*));
3742  }else /* if (s->c.pict_type == AV_PICTURE_TYPE_I) */{
3743  /* I-Frame */
3744  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3745  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3746 
3747  if (!m->fixed_qscale) {
3748  /* finding spatial complexity for I-frame rate control */
3749  s->c.avctx->execute(s->c.avctx, mb_var_thread, &s->c.enc_contexts[0],
3750  NULL, context_count, sizeof(void*));
3751  }
3752  }
3753  for(i=1; i<context_count; i++){
3754  merge_context_after_me(s, s->c.enc_contexts[i]);
3755  }
3756  m->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3757  m->mb_var_sum = s->me. mb_var_sum_temp;
3758  emms_c();
3759 
3760  if (s->me.scene_change_score > m->scenechange_threshold &&
3761  s->c.pict_type == AV_PICTURE_TYPE_P) {
3762  s->c.pict_type = AV_PICTURE_TYPE_I;
3763  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3764  s->mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
3765  if (s->c.msmpeg4_version >= MSMP4_V3)
3766  s->c.no_rounding = 1;
3767  ff_dlog(s->c.avctx, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3768  m->mb_var_sum, m->mc_mb_var_sum);
3769  }
3770 
3771  if (!s->umvplus) {
3772  if (s->c.pict_type == AV_PICTURE_TYPE_P || s->c.pict_type == AV_PICTURE_TYPE_S) {
3773  s->f_code = ff_get_best_fcode(m, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3774 
3775  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3776  int a,b;
3777  a = ff_get_best_fcode(m, s->c.p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3778  b = ff_get_best_fcode(m, s->c.p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3779  s->f_code = FFMAX3(s->f_code, a, b);
3780  }
3781 
3783  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3784  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3785  int j;
3786  for(i=0; i<2; i++){
3787  for(j=0; j<2; j++)
3788  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3789  s->c.p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3790  }
3791  }
3792  } else if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3793  int a, b;
3794 
3795  a = ff_get_best_fcode(m, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3796  b = ff_get_best_fcode(m, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3797  s->f_code = FFMAX(a, b);
3798 
3799  a = ff_get_best_fcode(m, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3800  b = ff_get_best_fcode(m, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3801  s->b_code = FFMAX(a, b);
3802 
3803  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3804  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3805  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3806  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3807  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3808  int dir, j;
3809  for(dir=0; dir<2; dir++){
3810  for(i=0; i<2; i++){
3811  for(j=0; j<2; j++){
3814  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3815  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3816  }
3817  }
3818  }
3819  }
3820  }
3821  }
3822 
3823  ret = estimate_qp(m, 0);
3824  if (ret < 0)
3825  return ret;
3826 
3827  if (s->c.qscale < 3 && s->max_qcoeff <= 128 &&
3828  s->c.pict_type == AV_PICTURE_TYPE_I &&
3829  !(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE))
3830  s->c.qscale = 3; //reduce clipping problems
3831 
3832  if (s->c.out_format == FMT_MJPEG) {
3834  (7 + s->c.qscale) / s->c.qscale, 65535);
3835  if (ret < 0)
3836  return ret;
3837 
3838  if (s->c.codec_id != AV_CODEC_ID_AMV) {
3839  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3840  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3841 
3842  if (s->c.avctx->intra_matrix) {
3843  chroma_matrix =
3844  luma_matrix = s->c.avctx->intra_matrix;
3845  }
3846  if (s->c.avctx->chroma_intra_matrix)
3847  chroma_matrix = s->c.avctx->chroma_intra_matrix;
3848 
3849  /* for mjpeg, we do include qscale in the matrix */
3850  for (int i = 1; i < 64; i++) {
3851  int j = s->c.idsp.idct_permutation[i];
3852 
3853  s->c.chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->c.qscale) >> 3);
3854  s->c. intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->c.qscale) >> 3);
3855  }
3856  s->c.y_dc_scale_table =
3857  s->c.c_dc_scale_table = ff_mpeg12_dc_scale_table[0];
3858  s->c.chroma_intra_matrix[0] = s->c.intra_matrix[0] = 8;
3859  } else {
3860  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3861  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3862  for (int i = 1; i < 64; i++) {
3863  int j = s->c.idsp.idct_permutation[ff_zigzag_direct[i]];
3864 
3865  s->c.intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3866  s->c.chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3867  }
3868  s->c.y_dc_scale_table = y;
3869  s->c.c_dc_scale_table = c;
3870  s->c.intra_matrix[0] = 13;
3871  s->c.chroma_intra_matrix[0] = 14;
3872  }
3873  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3874  s->c.intra_matrix, s->intra_quant_bias, 8, 8, 1);
3875  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3876  s->c.chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3877  s->c.qscale = 8;
3878  }
3879 
3880  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3881  s->c.cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3882  } else {
3883  s->c.cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3884  }
3885  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
3886 
3887  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3888  m->picture_in_gop_number = 0;
3889 
3890  s->c.mb_x = s->c.mb_y = 0;
3891  s->last_bits= put_bits_count(&s->pb);
3892  ret = m->encode_picture_header(m);
3893  if (ret < 0)
3894  return ret;
3895  bits= put_bits_count(&s->pb);
3896  m->header_bits = bits - s->last_bits;
3897 
3898  for(i=1; i<context_count; i++){
3899  update_duplicate_context_after_me(s->c.enc_contexts[i], s);
3900  }
3901  s->c.avctx->execute(s->c.avctx, encode_thread, &s->c.enc_contexts[0],
3902  NULL, context_count, sizeof(void*));
3903  for(i=1; i<context_count; i++){
3904  if (s->pb.buf_end == s->c.enc_contexts[i]->pb.buf)
3905  set_put_bits_buffer_size(&s->pb, FFMIN(s->c.enc_contexts[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3906  merge_context_after_encode(s, s->c.enc_contexts[i]);
3907  }
3908  emms_c();
3909  return 0;
3910 }
3911 
3912 static inline void denoise_dct(MPVEncContext *const s, int16_t block[])
3913 {
3914  if (!s->dct_error_sum)
3915  return;
3916 
3917  const int intra = s->c.mb_intra;
3918  s->dct_count[intra]++;
3919  s->mpvencdsp.denoise_dct(block, s->dct_error_sum[intra], s->dct_offset[intra]);
3920 }
3921 
3923  int16_t *block, int n,
3924  int qscale, int *overflow){
3925  const int *qmat;
3926  const uint16_t *matrix;
3927  const uint8_t *scantable;
3928  const uint8_t *perm_scantable;
3929  int max=0;
3930  unsigned int threshold1, threshold2;
3931  int bias=0;
3932  int run_tab[65];
3933  int level_tab[65];
3934  int score_tab[65];
3935  int survivor[65];
3936  int survivor_count;
3937  int last_run=0;
3938  int last_level=0;
3939  int last_score= 0;
3940  int last_i;
3941  int coeff[2][64];
3942  int coeff_count[64];
3943  int qmul, qadd, start_i, last_non_zero, i, dc;
3944  const int esc_length= s->ac_esc_length;
3945  const uint8_t *length, *last_length;
3946  const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3947  int mpeg2_qscale;
3948 
3949  s->fdsp.fdct(block);
3950 
3951  denoise_dct(s, block);
3952 
3953  qmul= qscale*16;
3954  qadd= ((qscale-1)|1)*8;
3955 
3956  if (s->c.q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3957  else mpeg2_qscale = qscale << 1;
3958 
3959  if (s->c.mb_intra) {
3960  int q;
3961  scantable = s->c.intra_scantable.scantable;
3962  perm_scantable = s->c.intra_scantable.permutated;
3963  if (!s->c.h263_aic) {
3964  if (n < 4)
3965  q = s->c.y_dc_scale;
3966  else
3967  q = s->c.c_dc_scale;
3968  q = q << 3;
3969  } else{
3970  /* For AIC we skip quant/dequant of INTRADC */
3971  q = 1 << 3;
3972  qadd=0;
3973  }
3974 
3975  /* note: block[0] is assumed to be positive */
3976  block[0] = (block[0] + (q >> 1)) / q;
3977  start_i = 1;
3978  last_non_zero = 0;
3979  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3980  matrix = n < 4 ? s->c.intra_matrix : s->c.chroma_intra_matrix;
3981  if (s->mpeg_quant || s->c.out_format == FMT_MPEG1 || s->c.out_format == FMT_MJPEG)
3982  bias= 1<<(QMAT_SHIFT-1);
3983 
3984  if (n > 3 && s->intra_chroma_ac_vlc_length) {
3985  length = s->intra_chroma_ac_vlc_length;
3986  last_length= s->intra_chroma_ac_vlc_last_length;
3987  } else {
3988  length = s->intra_ac_vlc_length;
3989  last_length= s->intra_ac_vlc_last_length;
3990  }
3991  } else {
3992  scantable = s->c.inter_scantable.scantable;
3993  perm_scantable = s->c.inter_scantable.permutated;
3994  start_i = 0;
3995  last_non_zero = -1;
3996  qmat = s->q_inter_matrix[qscale];
3997  matrix = s->c.inter_matrix;
3998  length = s->inter_ac_vlc_length;
3999  last_length= s->inter_ac_vlc_last_length;
4000  }
4001  last_i= start_i;
4002 
4003  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4004  threshold2= (threshold1<<1);
4005 
4006  for(i=63; i>=start_i; i--) {
4007  const int j = scantable[i];
4008  int64_t level = (int64_t)block[j] * qmat[j];
4009 
4010  if(((uint64_t)(level+threshold1))>threshold2){
4011  last_non_zero = i;
4012  break;
4013  }
4014  }
4015 
4016  for(i=start_i; i<=last_non_zero; i++) {
4017  const int j = scantable[i];
4018  int64_t level = (int64_t)block[j] * qmat[j];
4019 
4020 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4021 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4022  if(((uint64_t)(level+threshold1))>threshold2){
4023  if(level>0){
4024  level= (bias + level)>>QMAT_SHIFT;
4025  coeff[0][i]= level;
4026  coeff[1][i]= level-1;
4027 // coeff[2][k]= level-2;
4028  }else{
4029  level= (bias - level)>>QMAT_SHIFT;
4030  coeff[0][i]= -level;
4031  coeff[1][i]= -level+1;
4032 // coeff[2][k]= -level+2;
4033  }
4034  coeff_count[i]= FFMIN(level, 2);
4035  av_assert2(coeff_count[i]);
4036  max |=level;
4037  }else{
4038  coeff[0][i]= (level>>31)|1;
4039  coeff_count[i]= 1;
4040  }
4041  }
4042 
4043  *overflow= s->max_qcoeff < max; //overflow might have happened
4044 
4045  if(last_non_zero < start_i){
4046  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4047  return last_non_zero;
4048  }
4049 
4050  score_tab[start_i]= 0;
4051  survivor[0]= start_i;
4052  survivor_count= 1;
4053 
4054  for(i=start_i; i<=last_non_zero; i++){
4055  int level_index, j, zero_distortion;
4056  int dct_coeff= FFABS(block[ scantable[i] ]);
4057  int best_score=256*256*256*120;
4058 
4059  if (s->fdsp.fdct == ff_fdct_ifast)
4060  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4061  zero_distortion= dct_coeff*dct_coeff;
4062 
4063  for(level_index=0; level_index < coeff_count[i]; level_index++){
4064  int distortion;
4065  int level= coeff[level_index][i];
4066  const int alevel= FFABS(level);
4067  int unquant_coeff;
4068 
4069  av_assert2(level);
4070 
4071  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4072  unquant_coeff= alevel*qmul + qadd;
4073  } else if (s->c.out_format == FMT_MJPEG) {
4074  j = s->c.idsp.idct_permutation[scantable[i]];
4075  unquant_coeff = alevel * matrix[j] * 8;
4076  }else{ // MPEG-1
4077  j = s->c.idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4078  if (s->c.mb_intra) {
4079  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4080  unquant_coeff = (unquant_coeff - 1) | 1;
4081  }else{
4082  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4083  unquant_coeff = (unquant_coeff - 1) | 1;
4084  }
4085  unquant_coeff<<= 3;
4086  }
4087 
4088  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4089  level+=64;
4090  if((level&(~127)) == 0){
4091  for(j=survivor_count-1; j>=0; j--){
4092  int run= i - survivor[j];
4093  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4094  score += score_tab[i-run];
4095 
4096  if(score < best_score){
4097  best_score= score;
4098  run_tab[i+1]= run;
4099  level_tab[i+1]= level-64;
4100  }
4101  }
4102 
4103  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4104  for(j=survivor_count-1; j>=0; j--){
4105  int run= i - survivor[j];
4106  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4107  score += score_tab[i-run];
4108  if(score < last_score){
4109  last_score= score;
4110  last_run= run;
4111  last_level= level-64;
4112  last_i= i+1;
4113  }
4114  }
4115  }
4116  }else{
4117  distortion += esc_length*lambda;
4118  for(j=survivor_count-1; j>=0; j--){
4119  int run= i - survivor[j];
4120  int score= distortion + score_tab[i-run];
4121 
4122  if(score < best_score){
4123  best_score= score;
4124  run_tab[i+1]= run;
4125  level_tab[i+1]= level-64;
4126  }
4127  }
4128 
4129  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4130  for(j=survivor_count-1; j>=0; j--){
4131  int run= i - survivor[j];
4132  int score= distortion + score_tab[i-run];
4133  if(score < last_score){
4134  last_score= score;
4135  last_run= run;
4136  last_level= level-64;
4137  last_i= i+1;
4138  }
4139  }
4140  }
4141  }
4142  }
4143 
4144  score_tab[i+1]= best_score;
4145 
4146  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4147  if(last_non_zero <= 27){
4148  for(; survivor_count; survivor_count--){
4149  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4150  break;
4151  }
4152  }else{
4153  for(; survivor_count; survivor_count--){
4154  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4155  break;
4156  }
4157  }
4158 
4159  survivor[ survivor_count++ ]= i+1;
4160  }
4161 
4162  if (s->c.out_format != FMT_H263 && s->c.out_format != FMT_H261) {
4163  last_score= 256*256*256*120;
4164  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4165  int score= score_tab[i];
4166  if (i)
4167  score += lambda * 2; // FIXME more exact?
4168 
4169  if(score < last_score){
4170  last_score= score;
4171  last_i= i;
4172  last_level= level_tab[i];
4173  last_run= run_tab[i];
4174  }
4175  }
4176  }
4177 
4178  s->coded_score[n] = last_score;
4179 
4180  dc= FFABS(block[0]);
4181  last_non_zero= last_i - 1;
4182  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4183 
4184  if(last_non_zero < start_i)
4185  return last_non_zero;
4186 
4187  if(last_non_zero == 0 && start_i == 0){
4188  int best_level= 0;
4189  int best_score= dc * dc;
4190 
4191  for(i=0; i<coeff_count[0]; i++){
4192  int level= coeff[i][0];
4193  int alevel= FFABS(level);
4194  int unquant_coeff, score, distortion;
4195 
4196  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4197  unquant_coeff= (alevel*qmul + qadd)>>3;
4198  } else{ // MPEG-1
4199  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4200  unquant_coeff = (unquant_coeff - 1) | 1;
4201  }
4202  unquant_coeff = (unquant_coeff + 4) >> 3;
4203  unquant_coeff<<= 3 + 3;
4204 
4205  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4206  level+=64;
4207  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4208  else score= distortion + esc_length*lambda;
4209 
4210  if(score < best_score){
4211  best_score= score;
4212  best_level= level - 64;
4213  }
4214  }
4215  block[0]= best_level;
4216  s->coded_score[n] = best_score - dc*dc;
4217  if(best_level == 0) return -1;
4218  else return last_non_zero;
4219  }
4220 
4221  i= last_i;
4222  av_assert2(last_level);
4223 
4224  block[ perm_scantable[last_non_zero] ]= last_level;
4225  i -= last_run + 1;
4226 
4227  for(; i>start_i; i -= run_tab[i] + 1){
4228  block[ perm_scantable[i-1] ]= level_tab[i];
4229  }
4230 
4231  return last_non_zero;
4232 }
4233 
4234 static DECLARE_ALIGNED(16, int16_t, basis)[64][64];
4235 
4236 static void build_basis(uint8_t *perm){
4237  int i, j, x, y;
4238  emms_c();
4239  for(i=0; i<8; i++){
4240  for(j=0; j<8; j++){
4241  for(y=0; y<8; y++){
4242  for(x=0; x<8; x++){
4243  double s= 0.25*(1<<BASIS_SHIFT);
4244  int index= 8*i + j;
4245  int perm_index= perm[index];
4246  if(i==0) s*= sqrt(0.5);
4247  if(j==0) s*= sqrt(0.5);
4248  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4249  }
4250  }
4251  }
4252  }
4253 }
4254 
4255 static int dct_quantize_refine(MPVEncContext *const s, //FIXME breaks denoise?
4256  int16_t *block, int16_t *weight, int16_t *orig,
4257  int n, int qscale){
4258  DECLARE_ALIGNED(16, int16_t, rem)[64];
4259  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4260  const uint8_t *scantable;
4261  const uint8_t *perm_scantable;
4262 // unsigned int threshold1, threshold2;
4263 // int bias=0;
4264  int run_tab[65];
4265  int prev_run=0;
4266  int prev_level=0;
4267  int qmul, qadd, start_i, last_non_zero, i, dc;
4268  const uint8_t *length;
4269  const uint8_t *last_length;
4270  int lambda;
4271  int rle_index, run, q = 1, sum; //q is only used when s->c.mb_intra is true
4272 
4273  if(basis[0][0] == 0)
4274  build_basis(s->c.idsp.idct_permutation);
4275 
4276  qmul= qscale*2;
4277  qadd= (qscale-1)|1;
4278  if (s->c.mb_intra) {
4279  scantable = s->c.intra_scantable.scantable;
4280  perm_scantable = s->c.intra_scantable.permutated;
4281  if (!s->c.h263_aic) {
4282  if (n < 4)
4283  q = s->c.y_dc_scale;
4284  else
4285  q = s->c.c_dc_scale;
4286  } else{
4287  /* For AIC we skip quant/dequant of INTRADC */
4288  q = 1;
4289  qadd=0;
4290  }
4291  q <<= RECON_SHIFT-3;
4292  /* note: block[0] is assumed to be positive */
4293  dc= block[0]*q;
4294 // block[0] = (block[0] + (q >> 1)) / q;
4295  start_i = 1;
4296 // if (s->mpeg_quant || s->c.out_format == FMT_MPEG1)
4297 // bias= 1<<(QMAT_SHIFT-1);
4298  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4299  length = s->intra_chroma_ac_vlc_length;
4300  last_length= s->intra_chroma_ac_vlc_last_length;
4301  } else {
4302  length = s->intra_ac_vlc_length;
4303  last_length= s->intra_ac_vlc_last_length;
4304  }
4305  } else {
4306  scantable = s->c.inter_scantable.scantable;
4307  perm_scantable = s->c.inter_scantable.permutated;
4308  dc= 0;
4309  start_i = 0;
4310  length = s->inter_ac_vlc_length;
4311  last_length= s->inter_ac_vlc_last_length;
4312  }
4313  last_non_zero = s->c.block_last_index[n];
4314 
4315  dc += (1<<(RECON_SHIFT-1));
4316  for(i=0; i<64; i++){
4317  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4318  }
4319 
4320  sum=0;
4321  for(i=0; i<64; i++){
4322  int one= 36;
4323  int qns=4;
4324  int w;
4325 
4326  w= FFABS(weight[i]) + qns*one;
4327  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4328 
4329  weight[i] = w;
4330 // w=weight[i] = (63*qns + (w/2)) / w;
4331 
4332  av_assert2(w>0);
4333  av_assert2(w<(1<<6));
4334  sum += w*w;
4335  }
4336  lambda = sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4337 
4338  run=0;
4339  rle_index=0;
4340  for(i=start_i; i<=last_non_zero; i++){
4341  int j= perm_scantable[i];
4342  const int level= block[j];
4343  int coeff;
4344 
4345  if(level){
4346  if(level<0) coeff= qmul*level - qadd;
4347  else coeff= qmul*level + qadd;
4348  run_tab[rle_index++]=run;
4349  run=0;
4350 
4351  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4352  }else{
4353  run++;
4354  }
4355  }
4356 
4357  for(;;){
4358  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4359  int best_coeff=0;
4360  int best_change=0;
4361  int run2, best_unquant_change=0, analyze_gradient;
4362  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4363 
4364  if(analyze_gradient){
4365  for(i=0; i<64; i++){
4366  int w= weight[i];
4367 
4368  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4369  }
4370  s->fdsp.fdct(d1);
4371  }
4372 
4373  if(start_i){
4374  const int level= block[0];
4375  int change, old_coeff;
4376 
4377  av_assert2(s->c.mb_intra);
4378 
4379  old_coeff= q*level;
4380 
4381  for(change=-1; change<=1; change+=2){
4382  int new_level= level + change;
4383  int score, new_coeff;
4384 
4385  new_coeff= q*new_level;
4386  if(new_coeff >= 2048 || new_coeff < 0)
4387  continue;
4388 
4389  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4390  new_coeff - old_coeff);
4391  if(score<best_score){
4392  best_score= score;
4393  best_coeff= 0;
4394  best_change= change;
4395  best_unquant_change= new_coeff - old_coeff;
4396  }
4397  }
4398  }
4399 
4400  run=0;
4401  rle_index=0;
4402  run2= run_tab[rle_index++];
4403  prev_level=0;
4404  prev_run=0;
4405 
4406  for(i=start_i; i<64; i++){
4407  int j= perm_scantable[i];
4408  const int level= block[j];
4409  int change, old_coeff;
4410 
4411  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4412  break;
4413 
4414  if(level){
4415  if(level<0) old_coeff= qmul*level - qadd;
4416  else old_coeff= qmul*level + qadd;
4417  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4418  }else{
4419  old_coeff=0;
4420  run2--;
4421  av_assert2(run2>=0 || i >= last_non_zero );
4422  }
4423 
4424  for(change=-1; change<=1; change+=2){
4425  int new_level= level + change;
4426  int score, new_coeff, unquant_change;
4427 
4428  score=0;
4429  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4430  continue;
4431 
4432  if(new_level){
4433  if(new_level<0) new_coeff= qmul*new_level - qadd;
4434  else new_coeff= qmul*new_level + qadd;
4435  if(new_coeff >= 2048 || new_coeff <= -2048)
4436  continue;
4437  //FIXME check for overflow
4438 
4439  if(level){
4440  if(level < 63 && level > -63){
4441  if(i < last_non_zero)
4442  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4443  - length[UNI_AC_ENC_INDEX(run, level+64)];
4444  else
4445  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4446  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4447  }
4448  }else{
4449  av_assert2(FFABS(new_level)==1);
4450 
4451  if(analyze_gradient){
4452  int g= d1[ scantable[i] ];
4453  if(g && (g^new_level) >= 0)
4454  continue;
4455  }
4456 
4457  if(i < last_non_zero){
4458  int next_i= i + run2 + 1;
4459  int next_level= block[ perm_scantable[next_i] ] + 64;
4460 
4461  if(next_level&(~127))
4462  next_level= 0;
4463 
4464  if(next_i < last_non_zero)
4465  score += length[UNI_AC_ENC_INDEX(run, 65)]
4466  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4467  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4468  else
4469  score += length[UNI_AC_ENC_INDEX(run, 65)]
4470  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4471  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4472  }else{
4473  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4474  if(prev_level){
4475  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4476  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4477  }
4478  }
4479  }
4480  }else{
4481  new_coeff=0;
4482  av_assert2(FFABS(level)==1);
4483 
4484  if(i < last_non_zero){
4485  int next_i= i + run2 + 1;
4486  int next_level= block[ perm_scantable[next_i] ] + 64;
4487 
4488  if(next_level&(~127))
4489  next_level= 0;
4490 
4491  if(next_i < last_non_zero)
4492  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4493  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4494  - length[UNI_AC_ENC_INDEX(run, 65)];
4495  else
4496  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4497  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4498  - length[UNI_AC_ENC_INDEX(run, 65)];
4499  }else{
4500  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4501  if(prev_level){
4502  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4503  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4504  }
4505  }
4506  }
4507 
4508  score *= lambda;
4509 
4510  unquant_change= new_coeff - old_coeff;
4511  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4512 
4513  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4514  unquant_change);
4515  if(score<best_score){
4516  best_score= score;
4517  best_coeff= i;
4518  best_change= change;
4519  best_unquant_change= unquant_change;
4520  }
4521  }
4522  if(level){
4523  prev_level= level + 64;
4524  if(prev_level&(~127))
4525  prev_level= 0;
4526  prev_run= run;
4527  run=0;
4528  }else{
4529  run++;
4530  }
4531  }
4532 
4533  if(best_change){
4534  int j= perm_scantable[ best_coeff ];
4535 
4536  block[j] += best_change;
4537 
4538  if(best_coeff > last_non_zero){
4539  last_non_zero= best_coeff;
4540  av_assert2(block[j]);
4541  }else{
4542  for(; last_non_zero>=start_i; last_non_zero--){
4543  if(block[perm_scantable[last_non_zero]])
4544  break;
4545  }
4546  }
4547 
4548  run=0;
4549  rle_index=0;
4550  for(i=start_i; i<=last_non_zero; i++){
4551  int j= perm_scantable[i];
4552  const int level= block[j];
4553 
4554  if(level){
4555  run_tab[rle_index++]=run;
4556  run=0;
4557  }else{
4558  run++;
4559  }
4560  }
4561 
4562  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4563  }else{
4564  break;
4565  }
4566  }
4567 
4568  return last_non_zero;
4569 }
4570 
4571 /**
4572  * Permute an 8x8 block according to permutation.
4573  * @param block the block which will be permuted according to
4574  * the given permutation vector
4575  * @param permutation the permutation vector
4576  * @param last the last non zero coefficient in scantable order, used to
4577  * speed the permutation up
4578  * @param scantable the used scantable, this is only used to speed the
4579  * permutation up, the block is not (inverse) permutated
4580  * to scantable order!
4581  */
4582 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4583  const uint8_t *scantable, int last)
4584 {
4585  int i;
4586  int16_t temp[64];
4587 
4588  if (last <= 0)
4589  return;
4590  //FIXME it is ok but not clean and might fail for some permutations
4591  // if (permutation[1] == 1)
4592  // return;
4593 
4594  for (i = 0; i <= last; i++) {
4595  const int j = scantable[i];
4596  temp[j] = block[j];
4597  block[j] = 0;
4598  }
4599 
4600  for (i = 0; i <= last; i++) {
4601  const int j = scantable[i];
4602  const int perm_j = permutation[j];
4603  block[perm_j] = temp[j];
4604  }
4605 }
4606 
4607 static int dct_quantize_c(MPVEncContext *const s,
4608  int16_t *block, int n,
4609  int qscale, int *overflow)
4610 {
4611  int i, last_non_zero, q, start_i;
4612  const int *qmat;
4613  const uint8_t *scantable;
4614  int bias;
4615  int max=0;
4616  unsigned int threshold1, threshold2;
4617 
4618  s->fdsp.fdct(block);
4619 
4620  denoise_dct(s, block);
4621 
4622  if (s->c.mb_intra) {
4623  scantable = s->c.intra_scantable.scantable;
4624  if (!s->c.h263_aic) {
4625  if (n < 4)
4626  q = s->c.y_dc_scale;
4627  else
4628  q = s->c.c_dc_scale;
4629  q = q << 3;
4630  } else
4631  /* For AIC we skip quant/dequant of INTRADC */
4632  q = 1 << 3;
4633 
4634  /* note: block[0] is assumed to be positive */
4635  block[0] = (block[0] + (q >> 1)) / q;
4636  start_i = 1;
4637  last_non_zero = 0;
4638  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4639  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4640  } else {
4641  scantable = s->c.inter_scantable.scantable;
4642  start_i = 0;
4643  last_non_zero = -1;
4644  qmat = s->q_inter_matrix[qscale];
4645  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4646  }
4647  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4648  threshold2= (threshold1<<1);
4649  for(i=63;i>=start_i;i--) {
4650  const int j = scantable[i];
4651  int64_t level = (int64_t)block[j] * qmat[j];
4652 
4653  if(((uint64_t)(level+threshold1))>threshold2){
4654  last_non_zero = i;
4655  break;
4656  }else{
4657  block[j]=0;
4658  }
4659  }
4660  for(i=start_i; i<=last_non_zero; i++) {
4661  const int j = scantable[i];
4662  int64_t level = (int64_t)block[j] * qmat[j];
4663 
4664 // if( bias+level >= (1<<QMAT_SHIFT)
4665 // || bias-level >= (1<<QMAT_SHIFT)){
4666  if(((uint64_t)(level+threshold1))>threshold2){
4667  if(level>0){
4668  level= (bias + level)>>QMAT_SHIFT;
4669  block[j]= level;
4670  }else{
4671  level= (bias - level)>>QMAT_SHIFT;
4672  block[j]= -level;
4673  }
4674  max |=level;
4675  }else{
4676  block[j]=0;
4677  }
4678  }
4679  *overflow= s->max_qcoeff < max; //overflow might have happened
4680 
4681  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4682  if (s->c.idsp.perm_type != FF_IDCT_PERM_NONE)
4683  ff_block_permute(block, s->c.idsp.idct_permutation,
4684  scantable, last_non_zero);
4685 
4686  return last_non_zero;
4687 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1467
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3922
put_dct
static void put_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1124
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:31
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
Definition: motion_est.c:1655
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:359
FF_MATRIX_TYPE_INTRA
#define FF_MATRIX_TYPE_INTRA
Check if the elements of codec context matrices (intra_matrix, inter_matrix or chroma_intra_matrix) a...
Definition: encode.h:118
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:82
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:219
mpegvideo_unquantize.h
MPVMainEncContext::me_pre
int me_pre
prepass for motion estimation
Definition: mpegvideoenc.h:267
ff_fix_long_mvs
void ff_fix_long_mvs(MPVEncContext *const s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1704
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
MPVMainEncContext::fcode_tab
const uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideoenc.h:242
MPVMainEncContext::fixed_qscale
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideoenc.h:260
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:301
encode_mb_hq
static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2689
MPVMainEncContext::frame_skip_cmp_fn
me_cmp_func frame_skip_cmp_fn
Definition: mpegvideoenc.h:249
MPVMainEncContext::bit_rate
int64_t bit_rate
Definition: mpegvideoenc.h:254
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MPVEncContext *const s, int n, int threshold)
Definition: mpegvideo_enc.c:2131
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:172
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
h263data.h
init_unquantize
static av_cold void init_unquantize(MPVEncContext *const s2, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:310
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:101
encode_mb
static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2592
level
uint8_t level
Definition: svq3.c:208
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1487
av_clip
#define av_clip
Definition: common.h:100
MPVEncContext
Definition: mpegvideoenc.h:46
MPVEncContext::b_code
int b_code
backward MV resolution for B-frames
Definition: mpegvideoenc.h:81
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:524
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:225
ALIGN
#define ALIGN
Definition: hashtable.c:32
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:823
init_qscale_tab
static void init_qscale_tab(MPVEncContext *const s)
init s->c.cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:241
ff_mpv_init_duplicate_contexts
av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:99
update_noise_reduction
static void update_noise_reduction(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1863
MBBackup::mv_bits
int mv_bits
Definition: mpegvideo_enc.c:2614
MPVUnquantDSPContext::dct_unquantize_mpeg2_intra
void(* dct_unquantize_mpeg2_intra)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:39
mem_internal.h
MPVMainEncContext::dct_error_sum_base
char * dct_error_sum_base
backs dct_error_sum
Definition: mpegvideoenc.h:273
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:309
MBBackup::misc_bits
int misc_bits
Definition: mpegvideo_enc.c:2614
MpegEncContext::dc_val
int16_t * dc_val
used for H.263 AIC/MPEG-4 DC prediction and ER
Definition: mpegvideo.h:141
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
BUF_BITS
#define BUF_BITS
Definition: put_bits.h:47
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1289
set_frame_distances
static void set_frame_distances(MPVEncContext *const s)
Definition: mpegvideo_enc.c:3657
thread.h
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1885
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_speedhq_end_slice
void ff_speedhq_end_slice(MPVEncContext *const s)
Definition: speedhqenc.c:118
MBBackup::block_last_index
int block_last_index[8]
Definition: mpegvideo_enc.c:2609
estimate_qp
static int estimate_qp(MPVMainEncContext *const m, int dry_run)
Definition: mpegvideo_enc.c:3618
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *const m)
Definition: msmpeg4enc.c:673
matrix
Definition: vc1dsp.c:43
src1
const pixel * src1
Definition: h264pred_template.c:420
MPVEncContext::c
MpegEncContext c
the common base context
Definition: mpegvideoenc.h:47
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:213
MBBackup::last_mv
int last_mv[2][2][2]
Definition: mpegvideo_enc.c:2605
MPVMainEncContext::total_bits
int64_t total_bits
Definition: mpegvideoenc.h:255
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
ff_dct_encode_init
av_cold void ff_dct_encode_init(MPVEncContext *const s)
Definition: mpegvideo_enc.c:298
MPVMainEncContext::noise_reduction
int noise_reduction
Definition: mpegvideoenc.h:233
COPY
#define COPY(a)
ff_me_init_pic
void ff_me_init_pic(MPVEncContext *const s)
Definition: motion_est.c:371
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4234
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:956
estimate_best_b_count
static int estimate_best_b_count(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1488
MPVMainEncContext::last_lambda_for
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideoenc.h:261
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:165
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2806
ff_clean_h263_qscales
void ff_clean_h263_qscales(MPVEncContext *s)
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:816
out_size
static int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:170
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
sse
static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2740
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:294
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:139
MPVMainEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideoenc.h:245
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:154
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:529
AVFrame::width
int width
Definition: frame.h:499
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:191
internal.h
MBBackup::last_bits
int last_bits
Definition: mpegvideo_enc.c:2614
AVPacket::data
uint8_t * data
Definition: packet.h:588
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:379
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:42
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:110
MPVEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideoenc.h:52
data
const char data[16]
Definition: mxf.c:149
MPVMainEncContext::dts_delta
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideoenc.h:219
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
write_slice_end
static void write_slice_end(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2853
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:472
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:606
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:311
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:306
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2788
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2221
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
COPY_CONTEXT
#define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE)
Definition: mpegvideo_enc.c:2621
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:944
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:56
MPVMainEncContext::gop_size
int gop_size
Definition: mpegvideoenc.h:206
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1253
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:331
MPVMainEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideoenc.h:269
mpegutils.h
pix
enum AVPixelFormat pix
Definition: ohcodec.c:55
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:217
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:583
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:643
AVCodecContext::mb_cmp
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:870
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
MPVMainEncContext::encode_picture_header
int(* encode_picture_header)(struct MPVMainEncContext *m)
Definition: mpegvideoenc.h:251
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:305
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:169
MECmpContext::sum_abs_dctelem
int(* sum_abs_dctelem)(const int16_t *block)
Definition: me_cmp.h:51
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
MBBackup::i_tex_bits
int i_tex_bits
Definition: mpegvideo_enc.c:2614
MPVMainEncContext::coded_picture_number
int coded_picture_number
used to set pic->coded_picture_number
Definition: mpegvideoenc.h:210
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
set_bframe_chain_length
static int set_bframe_chain_length(MPVMainEncContext *const m)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1640
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:380
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
Performs dequantization and IDCT (if necessary)
Definition: mpegvideo_enc.c:1144
MBBackup::tex_pb
PutBitContext tex_pb
Definition: mpegvideo_enc.c:2618
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:886
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1902
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:339
sp5x.h
MBBackup::mb_skip_run
int mb_skip_run
Definition: mpegvideo_enc.c:2612
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:58
init_slice_buffers
static av_cold int init_slice_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:499
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FDCTDSPContext
Definition: fdctdsp.h:28
MPVMainEncContext::b_sensitivity
int b_sensitivity
Definition: mpegvideoenc.h:228
faandct.h
Floating point AAN DCT.
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:197
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:55
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:843
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:448
mpeg12enc.h
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:182
MBBackup::interlaced_dct
int interlaced_dct
Definition: mpegvideo_enc.c:2610
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
ff_vbv_update
int ff_vbv_update(MPVMainEncContext *m, int frame_size)
Definition: ratecontrol.c:718
fail
#define fail()
Definition: checkasm.h:220
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:59
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
ff_h263_encode_init
void ff_h263_encode_init(MPVMainEncContext *m)
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:961
MPVUnquantDSPContext::dct_unquantize_mpeg2_inter
void(* dct_unquantize_mpeg2_inter)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:41
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:496
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:296
perm
perm
Definition: f_perms.c:75
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
weight
const h264_weight_func weight
Definition: h264dsp_init.c:33
MPVMainEncContext::input_picture
MPVPicture * input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in display order
Definition: mpegvideoenc.h:212
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1221
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:314
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:314
get_intra_count
static int get_intra_count(MPVEncContext *const s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1226
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1287
sse_mb
static int sse_mb(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2762
ff_encode_add_stats_side_data
int ff_encode_add_stats_side_data(AVPacket *pkt, int quality, const int64_t error[], int error_count, enum AVPictureType pict_type)
Definition: encode.c:919
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:298
ff_sqrt
#define ff_sqrt
Definition: mathops.h:220
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
init_matrices
static av_cold int init_matrices(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:373
AVRational::num
int num
Numerator.
Definition: rational.h:59
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:145
refstruct.h
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:310
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:298
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:303
MPVMainEncContext::frame_skip_factor
int frame_skip_factor
Definition: mpegvideoenc.h:246
skip_check
static int skip_check(MPVMainEncContext *const m, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1425
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
MPVMainEncContext::stuffing_bits
int stuffing_bits
bits used for stuffing
Definition: mpegvideoenc.h:258
MPVMainEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideoenc.h:208
RateControlContext
rate control context.
Definition: ratecontrol.h:60
RateControlContext::num_entries
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:61
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MPVEncContext *s, int mb_line)
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
me_cmp_func
int(* me_cmp_func)(MPVEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:45
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:93
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:119
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:89
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4236
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:705
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
MPVMainEncContext::tmp_frames
AVFrame * tmp_frames[MPVENC_MAX_B_FRAMES+2]
temporary frames used by b_frame_strategy = 2
Definition: mpegvideoenc.h:226
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1212
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:122
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:265
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
MPVMainEncContext::brd_scale
int brd_scale
Definition: mpegvideoenc.h:229
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MBBackup::esc3_level_length
int esc3_level_length
Definition: mpegvideo_enc.c:2616
MPVMainEncContext::reordered_input_picture
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
Definition: mpegvideoenc.h:213
MPVMainEncContext::intra_only
int intra_only
if true, only intra pictures are generated
Definition: mpegvideoenc.h:205
MPVMainEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideoenc.h:270
merge_context_after_me
static void merge_context_after_me(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3583
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:835
MPVMainEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideoenc.h:264
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
MPVEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideoenc.h:50
MPVMainEncContext::header_bits
int header_bits
Definition: mpegvideoenc.h:257
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1565
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:107
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:225
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
MPVMainEncContext::max_b_frames
int max_b_frames
max number of B-frames
Definition: mpegvideoenc.h:207
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1063
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:270
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1282
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4582
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1517
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:282
PutBitContext
Definition: put_bits.h:50
ff_speedhq_mb_y_order_to_mb
static int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.h:41
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:299
MBBackup::mv_dir
int mv_dir
Definition: mpegvideo_enc.c:2606
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:837
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2831
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:57
arg
const char * arg
Definition: jacosubdec.c:65
mpv_encode_init_static
static av_cold void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:269
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:428
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1267
MECmpContext
Definition: me_cmp.h:50
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:304
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
MPVEncContext::block
int16_t(* block)[64]
points into blocks below
Definition: mpegvideoenc.h:114
NULL
#define NULL
Definition: coverity.c:32
MPVEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideoenc.h:126
MPVMainEncContext::lmin
int lmin
Definition: mpegvideoenc.h:236
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:599
run
uint8_t run
Definition: svq3.c:207
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:81
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:171
MPVUnquantDSPContext::dct_unquantize_mpeg1_intra
void(* dct_unquantize_mpeg1_intra)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:35
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:116
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
Definition: ratecontrol.c:908
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:300
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
MECmpContext::sad
me_cmp_func sad[6]
Definition: me_cmp.h:53
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:489
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
MAX_AC_TEX_MB_SIZE
@ MAX_AC_TEX_MB_SIZE
Definition: mpeg4videoenc.h:39
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
MPVEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideoenc.h:53
me_cmp_init
static av_cold int me_cmp_init(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:330
select_input_picture
static int select_input_picture(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1769
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:505
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:279
mathops.h
dct_error
static int dct_error(const struct algo *dct, int test, int is_idct, int speed, const int bits)
Definition: dct.c:188
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:327
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3582
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:876
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
MAX_PB2_MB_SIZE
@ MAX_PB2_MB_SIZE
Definition: mpeg4videoenc.h:38
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1088
MBBackup::qscale
int qscale
Definition: mpegvideo_enc.c:2608
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:945
qpeldsp.h
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2908
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MPVEncContext *const s)
Definition: h261enc.c:118
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
ff_mpv_unquantize_init
#define ff_mpv_unquantize_init(s, bitexact, q_scale_type)
Definition: mpegvideo_unquantize.h:50
add_dequant_dct
static void add_dequant_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1131
MpegEncContext::ac_val
int16_t(* ac_val)[16]
used for H.263 AIC, MPEG-4 AC prediction
Definition: mpegvideo.h:142
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1317
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1326
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Average and put pixel Widths can be 16, 8, 4 or 2.
Definition: hpeldsp.h:39
MBBackup::block
int16_t(* block)[64]
Definition: mpegvideo_enc.c:2617
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MPVEncContext *const dst, const MPVEncContext *const src)
Definition: mpegvideo_enc.c:253
MPVMainEncContext
Definition: mpegvideoenc.h:202
AVOnce
#define AVOnce
Definition: thread.h:202
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:173
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:823
load_input_picture
static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1285
MPVUnquantDSPContext::dct_unquantize_h263_inter
void(* dct_unquantize_h263_inter)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:45
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:436
ff_set_mpeg4_time
void ff_set_mpeg4_time(MPVEncContext *const s)
Definition: mpeg4videoenc.c:843
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:543
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:838
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1392
ff_h263_clean_intra_table_entries
static void ff_h263_clean_intra_table_entries(MpegEncContext *s, int xy)
Definition: h263.h:47
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1324
MPVMainEncContext::last_pict_type
int last_pict_type
Definition: mpegvideoenc.h:262
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:225
f
f
Definition: af_crystalizer.c:122
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:519
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:80
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:57
clip_coeffs
static void clip_coeffs(const MPVEncContext *const s, int16_t block[], int last_index)
Definition: mpegvideo_enc.c:2187
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:589
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1017
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:44
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
MPVUnquantDSPContext::dct_unquantize_mpeg1_inter
void(* dct_unquantize_mpeg1_inter)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:37
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
MPVMainEncContext::vbv_delay_pos
int vbv_delay_pos
offset of vbv_delay in the bitstream
Definition: mpegvideoenc.h:240
MECmpContext::sse
me_cmp_func sse[6]
Definition: me_cmp.h:54
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
MBBackup::mv_type
int mv_type
Definition: mpegvideo_enc.c:2606
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:549
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:335
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:308
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:293
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:95
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
MPVMainEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideoenc.h:256
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:313
encode_picture
static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3672
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:292
MECmpContext::nsse
me_cmp_func nsse[6]
Definition: me_cmp.h:62
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:443
MPVEncContext::me
MotionEstContext me
Definition: mpegvideoenc.h:78
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
Definition: mpegvideo_enc.c:372
MPVMainEncContext::input_picture_number
int input_picture_number
used to set pic->display_picture_number
Definition: mpegvideoenc.h:209
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MPVEncContext *s)
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:175
MBBackup::c
struct MBBackup::@221 c
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:594
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:297
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:38
MPVMainEncContext::me_penalty_compensation
int me_penalty_compensation
Definition: mpegvideoenc.h:266
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:289
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:306
MBBackup
Definition: mpegvideo_enc.c:2602
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:295
ff_rv20_encode_picture_header
int ff_rv20_encode_picture_header(MPVMainEncContext *const m)
Definition: rv20enc.c:37
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:137
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:266
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:312
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
get_bits_diff
static int get_bits_diff(MPVEncContext *s)
Definition: mpegvideoenc.h:411
MPVUnquantDSPContext::dct_unquantize_h263_intra
void(* dct_unquantize_h263_intra)(const MPVContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:43
MBBackup::last_dc
int last_dc[3]
Definition: mpegvideo_enc.c:2613
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:264
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
MBBackup::mv
int mv[2][4][2]
Definition: mpegvideo_enc.c:2604
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:581
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:90
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2942
MPVEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideoenc.h:80
MPVMainEncContext::mv_table_base
int16_t(* mv_table_base)[2]
Definition: mpegvideoenc.h:274
MBBackup::pb2
PutBitContext pb2
Definition: mpegvideo_enc.c:2618
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
FF_MATRIX_TYPE_CHROMA_INTRA
#define FF_MATRIX_TYPE_CHROMA_INTRA
Definition: encode.h:120
ff_h263_update_mb
void ff_h263_update_mb(MPVEncContext *s)
MPVEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideoenc.h:166
src2
const pixel * src2
Definition: h264pred_template.c:421
MPVEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideoenc.h:128
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:287
ff_rate_control_init
av_cold int ff_rate_control_init(MPVMainEncContext *const m)
Definition: ratecontrol.c:496
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:63
update_mb_info
static void update_mb_info(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2893
MPVENC_MAX_B_FRAMES
#define MPVENC_MAX_B_FRAMES
Definition: mpegvideoenc.h:44
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
mpv_encode_defaults
static av_cold void mpv_encode_defaults(MPVMainEncContext *const m)
Set the given MPVEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:278
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
MPVMainEncContext::next_lambda
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideoenc.h:259
AV_STRINGIFY
#define AV_STRINGIFY(s)
Definition: macros.h:66
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:887
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
MPVMainEncContext::border_masking
float border_masking
Definition: mpegvideoenc.h:235
ff_write_pass1_stats
void ff_write_pass1_stats(MPVMainEncContext *const m)
Definition: ratecontrol.c:37
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MPVEncContext *const s)
Definition: msmpeg4enc.c:285
ff_square_tab
const EXTERN uint32_t ff_square_tab[512]
Definition: mathops.h:35
MPVMainEncContext::last_non_b_pict_type
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideoenc.h:263
AVCodecContext::height
int height
Definition: avcodec.h:600
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:491
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
prepare_picture
static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1253
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
ff_get_2pass_fcode
void ff_get_2pass_fcode(MPVMainEncContext *const m)
Definition: ratecontrol.c:896
frame_end
static void frame_end(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1827
idctdsp.h
MPVPicture::b_frame_score
int b_frame_score
Definition: mpegpicture.h:84
encode_mb_internal
static av_always_inline void encode_mb_internal(MPVEncContext *const s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2245
avcodec.h
init_buffers
static av_cold int init_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:441
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, int bits_per_raw_sample)
Definition: pixblockdsp.c:87
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:332
ret
ret
Definition: filter_design.txt:187
ff_h263_mpeg4_reset_dc
void ff_h263_mpeg4_reset_dc(MPVEncContext *s)
MPVMainEncContext::vbv_ignore_qmax
int vbv_ignore_qmax
Definition: mpegvideoenc.h:237
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:303
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1369
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
MPVEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideoenc.h:75
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:97
ff_convert_matrix
void ff_convert_matrix(MPVEncContext *const s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:108
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVMainEncContext::reordered_pts
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideoenc.h:223
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:55
mpeg12data.h
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:844
MPVMainEncContext::frame_skip_cmp
int frame_skip_cmp
Definition: mpegvideoenc.h:248
MBBackup::dquant
int dquant
Definition: mpegvideo_enc.c:2615
AVCodecContext
main external API structure.
Definition: avcodec.h:439
AVFrame::height
int height
Definition: frame.h:499
MBBackup::mb_skipped
int mb_skipped
Definition: mpegvideo_enc.c:2607
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:402
MPVMainEncContext::lmax
int lmax
Definition: mpegvideoenc.h:236
ADD
#define ADD(field)
Definition: mpegvideo_enc.c:3581
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:231
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1246
AVRational::den
int den
Denominator.
Definition: rational.h:60
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MPVEncContext *const s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:238
MBBackup::i_count
int i_count
Definition: mpegvideo_enc.c:2614
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:830
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
temp
else temp
Definition: vf_mcdeint.c:271
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:861
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MPVMainEncContext::frame_skip_exp
int frame_skip_exp
Definition: mpegvideoenc.h:247
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:83
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:947
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:888
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
ff_mpeg1_clean_buffers
static void ff_mpeg1_clean_buffers(MPVEncContext *s)
Definition: mpeg12enc.h:29
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:290
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:777
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
denoise_dct
static void denoise_dct(MPVEncContext *const s, int16_t block[])
Definition: mpegvideo_enc.c:3912
dct_quantize_refine
static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4255
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:553
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1296
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1300
merge_context_after_encode
static void merge_context_after_encode(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3590
w
uint8_t w
Definition: llvidencdsp.c:39
MPVMainEncContext::b_frame_strategy
int b_frame_strategy
Definition: mpegvideoenc.h:227
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:278
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:168
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1033
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:946
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AVPacket
This structure stores compressed data.
Definition: packet.h:565
mpeg4videodata.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:965
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:276
MPVMainEncContext::scenechange_threshold
int scenechange_threshold
Definition: mpegvideoenc.h:231
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MPVEncContext *s)
Definition: mpegvideoenc.c:56
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:600
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:461
MBBackup::mb_intra
int mb_intra
Definition: mpegvideo_enc.c:2607
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
MPVUnquantDSPContext
Definition: mpegvideo_unquantize.h:34
h
h
Definition: vp9dsp_template.c:2070
MPVMainEncContext::user_specified_pts
int64_t user_specified_pts
last non-zero pts from user-supplied AVFrame
Definition: mpegvideoenc.h:215
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:888
dct_quantize_c
static int dct_quantize_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4607
stride
#define stride
Definition: h264pred_template.c:536
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:150
pkt
static AVPacket * pkt
Definition: demux_decode.c:55
MBBackup::pb
PutBitContext pb
Definition: mpegvideo_enc.c:2618
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:226
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:312
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
rv20enc.h
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:281
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
update_qscale
static void update_qscale(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:195
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:203
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:624
write_mb_info
static void write_mb_info(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2873
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
MBBackup::p_tex_bits
int p_tex_bits
Definition: mpegvideo_enc.c:2614
pixblockdsp.h
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:968
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
ff_check_codec_matrices
int ff_check_codec_matrices(AVCodecContext *avctx, unsigned types, uint16_t min, uint16_t max)
Definition: encode.c:944
FF_MATRIX_TYPE_INTER
#define FF_MATRIX_TYPE_INTER
Definition: encode.h:119
h263.h
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:709
ff_get_best_fcode
int ff_get_best_fcode(MPVMainEncContext *const m, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1599
intmath.h
MPVEncContext::mpeg_quant
int mpeg_quant
Definition: mpegvideoenc.h:167