FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <stdint.h>
37 
38 #include "libavutil/internal.h"
39 #include "libavutil/intmath.h"
40 #include "libavutil/mathematics.h"
41 #include "libavutil/mem_internal.h"
42 #include "libavutil/pixdesc.h"
43 #include "libavutil/opt.h"
44 #include "libavutil/thread.h"
45 #include "avcodec.h"
46 #include "dct.h"
47 #include "encode.h"
48 #include "idctdsp.h"
49 #include "mpeg12codecs.h"
50 #include "mpeg12data.h"
51 #include "mpeg12enc.h"
52 #include "mpegvideo.h"
53 #include "mpegvideodata.h"
54 #include "mpegvideoenc.h"
55 #include "h261enc.h"
56 #include "h263.h"
57 #include "h263data.h"
58 #include "h263enc.h"
59 #include "mjpegenc_common.h"
60 #include "mathops.h"
61 #include "mpegutils.h"
62 #include "mjpegenc.h"
63 #include "speedhqenc.h"
64 #include "msmpeg4enc.h"
65 #include "pixblockdsp.h"
66 #include "qpeldsp.h"
67 #include "faandct.h"
68 #include "aandcttab.h"
69 #include "flvenc.h"
70 #include "mpeg4video.h"
71 #include "mpeg4videodata.h"
72 #include "mpeg4videoenc.h"
73 #include "internal.h"
74 #include "bytestream.h"
75 #include "wmv2enc.h"
76 #include "rv10enc.h"
77 #include "packet_internal.h"
78 #include <limits.h>
79 #include "sp5x.h"
80 
81 #define QUANT_BIAS_SHIFT 8
82 
83 #define QMAT_SHIFT_MMX 16
84 #define QMAT_SHIFT 21
85 
86 static int encode_picture(MpegEncContext *s);
87 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
88 static int sse_mb(MpegEncContext *s);
89 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
90 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
91 
92 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
93 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
94 
95 static const AVOption mpv_generic_options[] = {
98  { NULL },
99 };
100 
102  .class_name = "generic mpegvideo encoder",
103  .item_name = av_default_item_name,
104  .option = mpv_generic_options,
105  .version = LIBAVUTIL_VERSION_INT,
106 };
107 
108 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
109  uint16_t (*qmat16)[2][64],
110  const uint16_t *quant_matrix,
111  int bias, int qmin, int qmax, int intra)
112 {
113  FDCTDSPContext *fdsp = &s->fdsp;
114  int qscale;
115  int shift = 0;
116 
117  for (qscale = qmin; qscale <= qmax; qscale++) {
118  int i;
119  int qscale2;
120 
121  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
122  else qscale2 = qscale << 1;
123 
124  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
125 #if CONFIG_FAANDCT
126  fdsp->fdct == ff_faandct ||
127 #endif /* CONFIG_FAANDCT */
129  for (i = 0; i < 64; i++) {
130  const int j = s->idsp.idct_permutation[i];
131  int64_t den = (int64_t) qscale2 * quant_matrix[j];
132  /* 16 <= qscale * quant_matrix[i] <= 7905
133  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
134  * 19952 <= x <= 249205026
135  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
136  * 3444240 >= (1 << 36) / (x) >= 275 */
137 
138  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
139  }
140  } else if (fdsp->fdct == ff_fdct_ifast) {
141  for (i = 0; i < 64; i++) {
142  const int j = s->idsp.idct_permutation[i];
143  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
144  /* 16 <= qscale * quant_matrix[i] <= 7905
145  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
146  * 19952 <= x <= 249205026
147  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
148  * 3444240 >= (1 << 36) / (x) >= 275 */
149 
150  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
151  }
152  } else {
153  for (i = 0; i < 64; i++) {
154  const int j = s->idsp.idct_permutation[i];
155  int64_t den = (int64_t) qscale2 * quant_matrix[j];
156  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
157  * Assume x = qscale * quant_matrix[i]
158  * So 16 <= x <= 7905
159  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
160  * so 32768 >= (1 << 19) / (x) >= 67 */
161  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
162  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
163  // (qscale * quant_matrix[i]);
164  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
165 
166  if (qmat16[qscale][0][i] == 0 ||
167  qmat16[qscale][0][i] == 128 * 256)
168  qmat16[qscale][0][i] = 128 * 256 - 1;
169  qmat16[qscale][1][i] =
170  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
171  qmat16[qscale][0][i]);
172  }
173  }
174 
175  for (i = intra; i < 64; i++) {
176  int64_t max = 8191;
177  if (fdsp->fdct == ff_fdct_ifast) {
178  max = (8191LL * ff_aanscales[i]) >> 14;
179  }
180  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
181  shift++;
182  }
183  }
184  }
185  if (shift) {
186  av_log(s->avctx, AV_LOG_INFO,
187  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
188  QMAT_SHIFT - shift);
189  }
190 }
191 
192 static inline void update_qscale(MpegEncContext *s)
193 {
194  if (s->q_scale_type == 1 && 0) {
195  int i;
196  int bestdiff=INT_MAX;
197  int best = 1;
198 
199  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
200  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
201  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
202  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
203  continue;
204  if (diff < bestdiff) {
205  bestdiff = diff;
206  best = i;
207  }
208  }
209  s->qscale = best;
210  } else {
211  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
212  (FF_LAMBDA_SHIFT + 7);
213  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
214  }
215 
216  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
218 }
219 
221 {
222  int i;
223 
224  if (matrix) {
225  put_bits(pb, 1, 1);
226  for (i = 0; i < 64; i++) {
228  }
229  } else
230  put_bits(pb, 1, 0);
231 }
232 
233 /**
234  * init s->current_picture.qscale_table from s->lambda_table
235  */
237 {
238  int8_t * const qscale_table = s->current_picture.qscale_table;
239  int i;
240 
241  for (i = 0; i < s->mb_num; i++) {
242  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
243  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
244  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
245  s->avctx->qmax);
246  }
247 }
248 
250  const MpegEncContext *src)
251 {
252 #define COPY(a) dst->a= src->a
253  COPY(pict_type);
255  COPY(f_code);
256  COPY(b_code);
257  COPY(qscale);
258  COPY(lambda);
259  COPY(lambda2);
260  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
261  COPY(progressive_frame); // FIXME don't set in encode_header
262  COPY(partitioned_frame); // FIXME don't set in encode_header
263 #undef COPY
264 }
265 
266 static void mpv_encode_init_static(void)
267 {
268  for (int i = -16; i < 16; i++)
269  default_fcode_tab[i + MAX_MV] = 1;
270 }
271 
272 /**
273  * Set the given MpegEncContext to defaults for encoding.
274  * the changed fields will not depend upon the prior state of the MpegEncContext.
275  */
277 {
278  static AVOnce init_static_once = AV_ONCE_INIT;
279 
281 
282  ff_thread_once(&init_static_once, mpv_encode_init_static);
283 
284  s->me.mv_penalty = default_mv_penalty;
285  s->fcode_tab = default_fcode_tab;
286 
287  s->input_picture_number = 0;
288  s->picture_in_gop_number = 0;
289 }
290 
292 {
293 #if ARCH_X86
295 #endif
296 
297  if (CONFIG_H263_ENCODER)
298  ff_h263dsp_init(&s->h263dsp);
299  if (!s->dct_quantize)
300  s->dct_quantize = ff_dct_quantize_c;
301  if (!s->denoise_dct)
302  s->denoise_dct = denoise_dct_c;
303  s->fast_dct_quantize = s->dct_quantize;
304  if (s->avctx->trellis)
305  s->dct_quantize = dct_quantize_trellis_c;
306 
307  return 0;
308 }
309 
310 /* init video encoder */
312 {
314  AVCPBProperties *cpb_props;
315  int i, ret;
316  int mb_array_size, mv_table_size;
317 
319 
320  switch (avctx->pix_fmt) {
321  case AV_PIX_FMT_YUVJ444P:
322  case AV_PIX_FMT_YUV444P:
323  s->chroma_format = CHROMA_444;
324  break;
325  case AV_PIX_FMT_YUVJ422P:
326  case AV_PIX_FMT_YUV422P:
327  s->chroma_format = CHROMA_422;
328  break;
329  case AV_PIX_FMT_YUVJ420P:
330  case AV_PIX_FMT_YUV420P:
331  default:
332  s->chroma_format = CHROMA_420;
333  break;
334  }
335 
337 
338  s->bit_rate = avctx->bit_rate;
339  s->width = avctx->width;
340  s->height = avctx->height;
341  if (avctx->gop_size > 600 &&
344  "keyframe interval too large!, reducing it from %d to %d\n",
345  avctx->gop_size, 600);
346  avctx->gop_size = 600;
347  }
348  s->gop_size = avctx->gop_size;
349  s->avctx = avctx;
351  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
352  "is %d.\n", MAX_B_FRAMES);
354  } else if (avctx->max_b_frames < 0) {
356  "max b frames must be 0 or positive for mpegvideo based encoders\n");
357  return AVERROR(EINVAL);
358  }
359  s->max_b_frames = avctx->max_b_frames;
360  s->codec_id = avctx->codec->id;
361  if (s->max_b_frames && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
362  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
363  return AVERROR(EINVAL);
364  }
365 
366  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
367  s->rtp_mode = !!s->rtp_payload_size;
368  s->intra_dc_precision = avctx->intra_dc_precision;
369 
370  // workaround some differences between how applications specify dc precision
371  if (s->intra_dc_precision < 0) {
372  s->intra_dc_precision += 8;
373  } else if (s->intra_dc_precision >= 8)
374  s->intra_dc_precision -= 8;
375 
376  if (s->intra_dc_precision < 0) {
378  "intra dc precision must be positive, note some applications use"
379  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
380  return AVERROR(EINVAL);
381  }
382 
383  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
384  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
385  return AVERROR(EINVAL);
386  }
387  s->user_specified_pts = AV_NOPTS_VALUE;
388 
389  if (s->gop_size <= 1) {
390  s->intra_only = 1;
391  s->gop_size = 12;
392  } else {
393  s->intra_only = 0;
394  }
395 
396  /* Fixed QSCALE */
397  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
398 
399  s->adaptive_quant = (avctx->lumi_masking ||
400  avctx->dark_masking ||
403  avctx->p_masking ||
404  s->border_masking ||
405  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
406  !s->fixed_qscale;
407 
408  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
409 
411  switch(avctx->codec_id) {
414  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
415  break;
416  case AV_CODEC_ID_MPEG4:
420  if (avctx->rc_max_rate >= 15000000) {
421  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
422  } else if(avctx->rc_max_rate >= 2000000) {
423  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
424  } else if(avctx->rc_max_rate >= 384000) {
425  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
426  } else
427  avctx->rc_buffer_size = 40;
428  avctx->rc_buffer_size *= 16384;
429  break;
430  }
431  if (avctx->rc_buffer_size) {
432  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
433  }
434  }
435 
436  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
437  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
438  return AVERROR(EINVAL);
439  }
440 
443  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
444  }
445 
447  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
448  return AVERROR(EINVAL);
449  }
450 
452  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
453  return AVERROR(EINVAL);
454  }
455 
456  if (avctx->rc_max_rate &&
460  "impossible bitrate constraints, this will fail\n");
461  }
462 
463  if (avctx->rc_buffer_size &&
464  avctx->bit_rate * (int64_t)avctx->time_base.num >
465  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
466  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
467  return AVERROR(EINVAL);
468  }
469 
470  if (!s->fixed_qscale &&
473  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
475  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
476  if (nbt <= INT_MAX) {
477  avctx->bit_rate_tolerance = nbt;
478  } else
479  avctx->bit_rate_tolerance = INT_MAX;
480  }
481 
482  if (avctx->rc_max_rate &&
484  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
485  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
486  90000LL * (avctx->rc_buffer_size - 1) >
487  avctx->rc_max_rate * 0xFFFFLL) {
489  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
490  "specified vbv buffer is too large for the given bitrate!\n");
491  }
492 
493  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
494  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
495  s->codec_id != AV_CODEC_ID_FLV1) {
496  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
497  return AVERROR(EINVAL);
498  }
499 
500  if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
502  "OBMC is only supported with simple mb decision\n");
503  return AVERROR(EINVAL);
504  }
505 
506  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
507  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
508  return AVERROR(EINVAL);
509  }
510 
511  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
512  s->codec_id == AV_CODEC_ID_H263 ||
513  s->codec_id == AV_CODEC_ID_H263P) &&
514  (avctx->sample_aspect_ratio.num > 255 ||
515  avctx->sample_aspect_ratio.den > 255)) {
517  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
521  }
522 
523  if ((s->codec_id == AV_CODEC_ID_H263 ||
524  s->codec_id == AV_CODEC_ID_H263P) &&
525  (avctx->width > 2048 ||
526  avctx->height > 1152 )) {
527  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
528  return AVERROR(EINVAL);
529  }
530  if ((s->codec_id == AV_CODEC_ID_H263 ||
531  s->codec_id == AV_CODEC_ID_H263P ||
532  s->codec_id == AV_CODEC_ID_RV20) &&
533  ((avctx->width &3) ||
534  (avctx->height&3) )) {
535  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
536  return AVERROR(EINVAL);
537  }
538 
539  if (s->codec_id == AV_CODEC_ID_RV10 &&
540  (avctx->width &15 ||
541  avctx->height&15 )) {
542  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
543  return AVERROR(EINVAL);
544  }
545 
546  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
547  s->codec_id == AV_CODEC_ID_WMV2) &&
548  avctx->width & 1) {
549  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
550  return AVERROR(EINVAL);
551  }
552 
554  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
555  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
556  return AVERROR(EINVAL);
557  }
558 
559  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
560  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
561  return AVERROR(EINVAL);
562  }
563 
564  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
566  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
567  return AVERROR(EINVAL);
568  }
569 
570  if (s->scenechange_threshold < 1000000000 &&
573  "closed gop with scene change detection are not supported yet, "
574  "set threshold to 1000000000\n");
575  return AVERROR_PATCHWELCOME;
576  }
577 
579  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
582  "low delay forcing is only available for mpeg2, "
583  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
584  return AVERROR(EINVAL);
585  }
586  if (s->max_b_frames != 0) {
588  "B-frames cannot be used with low delay\n");
589  return AVERROR(EINVAL);
590  }
591  }
592 
593  if (s->q_scale_type == 1) {
594  if (avctx->qmax > 28) {
596  "non linear quant only supports qmax <= 28 currently\n");
597  return AVERROR_PATCHWELCOME;
598  }
599  }
600 
601  if (avctx->slices > 1 &&
603  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
604  return AVERROR(EINVAL);
605  }
606 
607  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
609  "notice: b_frame_strategy only affects the first pass\n");
610  s->b_frame_strategy = 0;
611  }
612 
614  if (i > 1) {
615  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
616  avctx->time_base.den /= i;
617  avctx->time_base.num /= i;
618  //return -1;
619  }
620 
621  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
622  // (a + x * 3 / 8) / x
623  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
624  s->inter_quant_bias = 0;
625  } else {
626  s->intra_quant_bias = 0;
627  // (a - x / 4) / x
628  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
629  }
630 
631  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
632  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
633  return AVERROR(EINVAL);
634  }
635 
636  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
637 
638  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
639  avctx->time_base.den > (1 << 16) - 1) {
641  "timebase %d/%d not supported by MPEG 4 standard, "
642  "the maximum admitted value for the timebase denominator "
643  "is %d\n", avctx->time_base.num, avctx->time_base.den,
644  (1 << 16) - 1);
645  return AVERROR(EINVAL);
646  }
647  s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
648 
649  switch (avctx->codec->id) {
650 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
652  s->rtp_mode = 1;
653  /* fallthrough */
655  s->out_format = FMT_MPEG1;
656  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
657  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
659  break;
660 #endif
661 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
662  case AV_CODEC_ID_MJPEG:
663  case AV_CODEC_ID_AMV:
664  s->out_format = FMT_MJPEG;
665  s->intra_only = 1; /* force intra only for jpeg */
666  if ((ret = ff_mjpeg_encode_init(s)) < 0)
667  return ret;
668  avctx->delay = 0;
669  s->low_delay = 1;
670  break;
671 #endif
672  case AV_CODEC_ID_SPEEDHQ:
673  s->out_format = FMT_SPEEDHQ;
674  s->intra_only = 1; /* force intra only for SHQ */
675  if (!CONFIG_SPEEDHQ_ENCODER)
677  if ((ret = ff_speedhq_encode_init(s)) < 0)
678  return ret;
679  avctx->delay = 0;
680  s->low_delay = 1;
681  break;
682  case AV_CODEC_ID_H261:
683  if (!CONFIG_H261_ENCODER)
686  if (ret < 0)
687  return ret;
688  s->out_format = FMT_H261;
689  avctx->delay = 0;
690  s->low_delay = 1;
691  s->rtp_mode = 0; /* Sliced encoding not supported */
692  break;
693  case AV_CODEC_ID_H263:
694  if (!CONFIG_H263_ENCODER)
697  s->width, s->height) == 8) {
699  "The specified picture size of %dx%d is not valid for "
700  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
701  "352x288, 704x576, and 1408x1152. "
702  "Try H.263+.\n", s->width, s->height);
703  return AVERROR(EINVAL);
704  }
705  s->out_format = FMT_H263;
706  avctx->delay = 0;
707  s->low_delay = 1;
708  break;
709  case AV_CODEC_ID_H263P:
710  s->out_format = FMT_H263;
711  s->h263_plus = 1;
712  /* Fx */
713  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
714  s->modified_quant = s->h263_aic;
715  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
716  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
717 
718  /* /Fx */
719  /* These are just to be sure */
720  avctx->delay = 0;
721  s->low_delay = 1;
722  break;
723  case AV_CODEC_ID_FLV1:
724  s->out_format = FMT_H263;
725  s->h263_flv = 2; /* format = 1; 11-bit codes */
726  s->unrestricted_mv = 1;
727  s->rtp_mode = 0; /* don't allow GOB */
728  avctx->delay = 0;
729  s->low_delay = 1;
730  break;
731  case AV_CODEC_ID_RV10:
732  s->out_format = FMT_H263;
733  avctx->delay = 0;
734  s->low_delay = 1;
735  break;
736  case AV_CODEC_ID_RV20:
737  s->out_format = FMT_H263;
738  avctx->delay = 0;
739  s->low_delay = 1;
740  s->modified_quant = 1;
741  s->h263_aic = 1;
742  s->h263_plus = 1;
743  s->loop_filter = 1;
744  s->unrestricted_mv = 0;
745  break;
746  case AV_CODEC_ID_MPEG4:
747  s->out_format = FMT_H263;
748  s->h263_pred = 1;
749  s->unrestricted_mv = 1;
750  s->low_delay = s->max_b_frames ? 0 : 1;
751  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
752  break;
754  s->out_format = FMT_H263;
755  s->h263_pred = 1;
756  s->unrestricted_mv = 1;
757  s->msmpeg4_version = 2;
758  avctx->delay = 0;
759  s->low_delay = 1;
760  break;
762  s->out_format = FMT_H263;
763  s->h263_pred = 1;
764  s->unrestricted_mv = 1;
765  s->msmpeg4_version = 3;
766  s->flipflop_rounding = 1;
767  avctx->delay = 0;
768  s->low_delay = 1;
769  break;
770  case AV_CODEC_ID_WMV1:
771  s->out_format = FMT_H263;
772  s->h263_pred = 1;
773  s->unrestricted_mv = 1;
774  s->msmpeg4_version = 4;
775  s->flipflop_rounding = 1;
776  avctx->delay = 0;
777  s->low_delay = 1;
778  break;
779  case AV_CODEC_ID_WMV2:
780  s->out_format = FMT_H263;
781  s->h263_pred = 1;
782  s->unrestricted_mv = 1;
783  s->msmpeg4_version = 5;
784  s->flipflop_rounding = 1;
785  avctx->delay = 0;
786  s->low_delay = 1;
787  break;
788  default:
789  return AVERROR(EINVAL);
790  }
791 
792  avctx->has_b_frames = !s->low_delay;
793 
794  s->encoding = 1;
795 
796  s->progressive_frame =
797  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
799  s->alternate_scan);
800 
801  /* init */
803  if ((ret = ff_mpv_common_init(s)) < 0)
804  return ret;
805 
806  ff_fdctdsp_init(&s->fdsp, avctx);
807  ff_me_cmp_init(&s->mecc, avctx);
808  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
809  ff_pixblockdsp_init(&s->pdsp, avctx);
810 
811  if (!(avctx->stats_out = av_mallocz(256)) ||
812  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
813  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
814  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
815  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
816  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
817  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
818  !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
819  !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
820  return AVERROR(ENOMEM);
821 
822  /* Allocate MV tables; the MV and MB tables will be copied
823  * to slice contexts by ff_update_duplicate_context(). */
824  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
825  if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) ||
826  !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) ||
827  !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) ||
828  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_forw_mv_table_base, mv_table_size) ||
829  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_back_mv_table_base, mv_table_size) ||
830  !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size))
831  return AVERROR(ENOMEM);
832  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
833  s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
834  s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
835  s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
836  s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
837  s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
838 
839  /* Allocate MB type table */
840  mb_array_size = s->mb_stride * s->mb_height;
841  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) ||
842  !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) ||
843  !FF_ALLOC_TYPED_ARRAY (s->cplx_tab, mb_array_size) ||
844  !FF_ALLOC_TYPED_ARRAY (s->bits_tab, mb_array_size) ||
845  !FF_ALLOCZ_TYPED_ARRAY(s->mc_mb_var, mb_array_size) ||
846  !FF_ALLOCZ_TYPED_ARRAY(s->mb_var, mb_array_size) ||
847  !(s->mb_mean = av_mallocz(mb_array_size)))
848  return AVERROR(ENOMEM);
849 
850 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
851  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
852  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
853  int16_t (*tmp1)[2];
854  uint8_t *tmp2;
855  if (!(tmp1 = ALLOCZ_ARRAYS(s->b_field_mv_table_base, 8, mv_table_size)) ||
856  !(tmp2 = ALLOCZ_ARRAYS(s->b_field_select_table[0][0], 2 * 4, mv_table_size)) ||
857  !ALLOCZ_ARRAYS(s->p_field_select_table[0], 2 * 2, mv_table_size))
858  return AVERROR(ENOMEM);
859 
860  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
861  tmp1 += s->mb_stride + 1;
862 
863  for (int i = 0; i < 2; i++) {
864  for (int j = 0; j < 2; j++) {
865  for (int k = 0; k < 2; k++) {
866  s->b_field_mv_table[i][j][k] = tmp1;
867  tmp1 += mv_table_size;
868  }
869  s->b_field_select_table[i][j] = tmp2;
870  tmp2 += 2 * mv_table_size;
871  }
872  }
873  }
874 
875  if (s->noise_reduction) {
876  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
877  return AVERROR(ENOMEM);
878  }
879 
881 
882  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
883  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
884  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
885  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
886  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
887  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
888  } else {
889  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
890  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
891  }
892 
893  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
894  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
895 
896  if (s->slice_context_count > 1) {
897  s->rtp_mode = 1;
898 
900  s->h263_slice_structured = 1;
901  }
902 
903  s->quant_precision = 5;
904 
905  ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
906  ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
907 
908  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263) {
910  if (CONFIG_MSMPEG4ENC && s->msmpeg4_version)
912  }
913 
914  /* init q matrix */
915  for (i = 0; i < 64; i++) {
916  int j = s->idsp.idct_permutation[i];
917  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
918  s->mpeg_quant) {
919  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
920  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
921  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
922  s->intra_matrix[j] =
923  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
924  } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
925  s->intra_matrix[j] =
926  s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
927  } else {
928  /* MPEG-1/2 */
929  s->chroma_intra_matrix[j] =
930  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
931  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
932  }
933  if (avctx->intra_matrix)
934  s->intra_matrix[j] = avctx->intra_matrix[i];
935  if (avctx->inter_matrix)
936  s->inter_matrix[j] = avctx->inter_matrix[i];
937  }
938 
939  /* precompute matrix */
940  /* for mjpeg, we do include qscale in the matrix */
941  if (s->out_format != FMT_MJPEG) {
942  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
943  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
944  31, 1);
945  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
946  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
947  31, 0);
948  }
949 
950  if ((ret = ff_rate_control_init(s)) < 0)
951  return ret;
952 
953  if (s->b_frame_strategy == 2) {
954  for (i = 0; i < s->max_b_frames + 2; i++) {
955  s->tmp_frames[i] = av_frame_alloc();
956  if (!s->tmp_frames[i])
957  return AVERROR(ENOMEM);
958 
959  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
960  s->tmp_frames[i]->width = s->width >> s->brd_scale;
961  s->tmp_frames[i]->height = s->height >> s->brd_scale;
962 
963  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
964  if (ret < 0)
965  return ret;
966  }
967  }
968 
969  cpb_props = ff_add_cpb_side_data(avctx);
970  if (!cpb_props)
971  return AVERROR(ENOMEM);
972  cpb_props->max_bitrate = avctx->rc_max_rate;
973  cpb_props->min_bitrate = avctx->rc_min_rate;
974  cpb_props->avg_bitrate = avctx->bit_rate;
975  cpb_props->buffer_size = avctx->rc_buffer_size;
976 
977  return 0;
978 }
979 
981 {
983  int i;
984 
986 
988 
989  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
990  av_frame_free(&s->tmp_frames[i]);
991 
992  av_frame_free(&s->new_picture);
993 
995 
996  av_freep(&s->p_mv_table_base);
997  av_freep(&s->b_forw_mv_table_base);
998  av_freep(&s->b_back_mv_table_base);
999  av_freep(&s->b_bidir_forw_mv_table_base);
1000  av_freep(&s->b_bidir_back_mv_table_base);
1001  av_freep(&s->b_direct_mv_table_base);
1002  av_freep(&s->b_field_mv_table_base);
1003  av_freep(&s->b_field_select_table[0][0]);
1004  av_freep(&s->p_field_select_table[0]);
1005 
1006  av_freep(&s->mb_type);
1007  av_freep(&s->lambda_table);
1008 
1009  av_freep(&s->cplx_tab);
1010  av_freep(&s->bits_tab);
1011 
1012  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1013  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1014  s->q_chroma_intra_matrix= NULL;
1015  s->q_chroma_intra_matrix16= NULL;
1016  av_freep(&s->q_intra_matrix);
1017  av_freep(&s->q_inter_matrix);
1018  av_freep(&s->q_intra_matrix16);
1019  av_freep(&s->q_inter_matrix16);
1020  av_freep(&s->input_picture);
1021  av_freep(&s->reordered_input_picture);
1022  av_freep(&s->dct_offset);
1023  av_freep(&s->mb_var);
1024  av_freep(&s->mc_mb_var);
1025  av_freep(&s->mb_mean);
1026 
1027  return 0;
1028 }
1029 
1030 #define IS_ENCODER 1
1032 
1033 static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
1034 {
1035  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1036  /* print DCT coefficients */
1037  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1038  for (int i = 0; i < 6; i++) {
1039  for (int j = 0; j < 64; j++) {
1040  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1041  block[i][s->idsp.idct_permutation[j]]);
1042  }
1043  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1044  }
1045  }
1046 
1048 }
1049 
1050 static int get_sae(const uint8_t *src, int ref, int stride)
1051 {
1052  int x,y;
1053  int acc = 0;
1054 
1055  for (y = 0; y < 16; y++) {
1056  for (x = 0; x < 16; x++) {
1057  acc += FFABS(src[x + y * stride] - ref);
1058  }
1059  }
1060 
1061  return acc;
1062 }
1063 
1064 static int get_intra_count(MpegEncContext *s, const uint8_t *src,
1065  const uint8_t *ref, int stride)
1066 {
1067  int x, y, w, h;
1068  int acc = 0;
1069 
1070  w = s->width & ~15;
1071  h = s->height & ~15;
1072 
1073  for (y = 0; y < h; y += 16) {
1074  for (x = 0; x < w; x += 16) {
1075  int offset = x + y * stride;
1076  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1077  stride, 16);
1078  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1079  int sae = get_sae(src + offset, mean, stride);
1080 
1081  acc += sae + 500 < sad;
1082  }
1083  }
1084  return acc;
1085 }
1086 
1087 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1088 {
1089  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1090  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1091  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1092  &s->linesize, &s->uvlinesize);
1093 }
1094 
1095 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1096 {
1097  Picture *pic = NULL;
1098  int64_t pts;
1099  int i, display_picture_number = 0, ret;
1100  int encoding_delay = s->max_b_frames ? s->max_b_frames
1101  : (s->low_delay ? 0 : 1);
1102  int flush_offset = 1;
1103  int direct = 1;
1104 
1105  if (pic_arg) {
1106  pts = pic_arg->pts;
1107  display_picture_number = s->input_picture_number++;
1108 
1109  if (pts != AV_NOPTS_VALUE) {
1110  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1111  int64_t last = s->user_specified_pts;
1112 
1113  if (pts <= last) {
1114  av_log(s->avctx, AV_LOG_ERROR,
1115  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1116  pts, last);
1117  return AVERROR(EINVAL);
1118  }
1119 
1120  if (!s->low_delay && display_picture_number == 1)
1121  s->dts_delta = pts - last;
1122  }
1123  s->user_specified_pts = pts;
1124  } else {
1125  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1126  s->user_specified_pts =
1127  pts = s->user_specified_pts + 1;
1128  av_log(s->avctx, AV_LOG_INFO,
1129  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1130  pts);
1131  } else {
1132  pts = display_picture_number;
1133  }
1134  }
1135 
1136  if (!pic_arg->buf[0] ||
1137  pic_arg->linesize[0] != s->linesize ||
1138  pic_arg->linesize[1] != s->uvlinesize ||
1139  pic_arg->linesize[2] != s->uvlinesize)
1140  direct = 0;
1141  if ((s->width & 15) || (s->height & 15))
1142  direct = 0;
1143  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1144  direct = 0;
1145  if (s->linesize & (STRIDE_ALIGN-1))
1146  direct = 0;
1147 
1148  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1149  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1150 
1151  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1152  if (i < 0)
1153  return i;
1154 
1155  pic = &s->picture[i];
1156  pic->reference = 3;
1157 
1158  if (direct) {
1159  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1160  return ret;
1161  }
1162  ret = alloc_picture(s, pic, direct);
1163  if (ret < 0)
1164  return ret;
1165 
1166  if (!direct) {
1167  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1168  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1169  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1170  // empty
1171  } else {
1172  int h_chroma_shift, v_chroma_shift;
1173  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1174  &h_chroma_shift,
1175  &v_chroma_shift);
1176 
1177  for (i = 0; i < 3; i++) {
1178  int src_stride = pic_arg->linesize[i];
1179  int dst_stride = i ? s->uvlinesize : s->linesize;
1180  int h_shift = i ? h_chroma_shift : 0;
1181  int v_shift = i ? v_chroma_shift : 0;
1182  int w = s->width >> h_shift;
1183  int h = s->height >> v_shift;
1184  const uint8_t *src = pic_arg->data[i];
1185  uint8_t *dst = pic->f->data[i];
1186  int vpad = 16;
1187 
1188  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1189  && !s->progressive_sequence
1190  && FFALIGN(s->height, 32) - s->height > 16)
1191  vpad = 32;
1192 
1193  if (!s->avctx->rc_buffer_size)
1194  dst += INPLACE_OFFSET;
1195 
1196  if (src_stride == dst_stride)
1197  memcpy(dst, src, src_stride * h);
1198  else {
1199  int h2 = h;
1200  uint8_t *dst2 = dst;
1201  while (h2--) {
1202  memcpy(dst2, src, w);
1203  dst2 += dst_stride;
1204  src += src_stride;
1205  }
1206  }
1207  if ((s->width & 15) || (s->height & (vpad-1))) {
1208  s->mpvencdsp.draw_edges(dst, dst_stride,
1209  w, h,
1210  16 >> h_shift,
1211  vpad >> v_shift,
1212  EDGE_BOTTOM);
1213  }
1214  }
1215  emms_c();
1216  }
1217  }
1218  ret = av_frame_copy_props(pic->f, pic_arg);
1219  if (ret < 0)
1220  return ret;
1221 
1222  pic->display_picture_number = display_picture_number;
1223  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1224  } else {
1225  /* Flushing: When we have not received enough input frames,
1226  * ensure s->input_picture[0] contains the first picture */
1227  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1228  if (s->input_picture[flush_offset])
1229  break;
1230 
1231  if (flush_offset <= 1)
1232  flush_offset = 1;
1233  else
1234  encoding_delay = encoding_delay - flush_offset + 1;
1235  }
1236 
1237  /* shift buffer entries */
1238  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1239  s->input_picture[i - flush_offset] = s->input_picture[i];
1240 
1241  s->input_picture[encoding_delay] = pic;
1242 
1243  return 0;
1244 }
1245 
1246 static int skip_check(MpegEncContext *s, const Picture *p, const Picture *ref)
1247 {
1248  int x, y, plane;
1249  int score = 0;
1250  int64_t score64 = 0;
1251 
1252  for (plane = 0; plane < 3; plane++) {
1253  const int stride = p->f->linesize[plane];
1254  const int bw = plane ? 1 : 2;
1255  for (y = 0; y < s->mb_height * bw; y++) {
1256  for (x = 0; x < s->mb_width * bw; x++) {
1257  int off = p->shared ? 0 : 16;
1258  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1259  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1260  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1261 
1262  switch (FFABS(s->frame_skip_exp)) {
1263  case 0: score = FFMAX(score, v); break;
1264  case 1: score += FFABS(v); break;
1265  case 2: score64 += v * (int64_t)v; break;
1266  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1267  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1268  }
1269  }
1270  }
1271  }
1272  emms_c();
1273 
1274  if (score)
1275  score64 = score;
1276  if (s->frame_skip_exp < 0)
1277  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1278  -1.0/s->frame_skip_exp);
1279 
1280  if (score64 < s->frame_skip_threshold)
1281  return 1;
1282  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1283  return 1;
1284  return 0;
1285 }
1286 
1288 {
1289  int ret;
1290  int size = 0;
1291 
1293  if (ret < 0)
1294  return ret;
1295 
1296  do {
1298  if (ret >= 0) {
1299  size += pkt->size;
1301  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1302  return ret;
1303  } while (ret >= 0);
1304 
1305  return size;
1306 }
1307 
1309 {
1310  AVPacket *pkt;
1311  const int scale = s->brd_scale;
1312  int width = s->width >> scale;
1313  int height = s->height >> scale;
1314  int i, j, out_size, p_lambda, b_lambda, lambda2;
1315  int64_t best_rd = INT64_MAX;
1316  int best_b_count = -1;
1317  int ret = 0;
1318 
1319  av_assert0(scale >= 0 && scale <= 3);
1320 
1321  pkt = av_packet_alloc();
1322  if (!pkt)
1323  return AVERROR(ENOMEM);
1324 
1325  //emms_c();
1326  //s->next_picture_ptr->quality;
1327  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1328  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1329  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1330  if (!b_lambda) // FIXME we should do this somewhere else
1331  b_lambda = p_lambda;
1332  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1334 
1335  for (i = 0; i < s->max_b_frames + 2; i++) {
1336  const Picture *pre_input_ptr = i ? s->input_picture[i - 1] :
1337  s->next_picture_ptr;
1338 
1339  if (pre_input_ptr) {
1340  const uint8_t *data[4];
1341  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1342 
1343  if (!pre_input_ptr->shared && i) {
1344  data[0] += INPLACE_OFFSET;
1345  data[1] += INPLACE_OFFSET;
1346  data[2] += INPLACE_OFFSET;
1347  }
1348 
1349  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1350  s->tmp_frames[i]->linesize[0],
1351  data[0],
1352  pre_input_ptr->f->linesize[0],
1353  width, height);
1354  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1355  s->tmp_frames[i]->linesize[1],
1356  data[1],
1357  pre_input_ptr->f->linesize[1],
1358  width >> 1, height >> 1);
1359  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1360  s->tmp_frames[i]->linesize[2],
1361  data[2],
1362  pre_input_ptr->f->linesize[2],
1363  width >> 1, height >> 1);
1364  }
1365  }
1366 
1367  for (j = 0; j < s->max_b_frames + 1; j++) {
1368  AVCodecContext *c;
1369  int64_t rd = 0;
1370 
1371  if (!s->input_picture[j])
1372  break;
1373 
1375  if (!c) {
1376  ret = AVERROR(ENOMEM);
1377  goto fail;
1378  }
1379 
1380  c->width = width;
1381  c->height = height;
1383  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1384  c->mb_decision = s->avctx->mb_decision;
1385  c->me_cmp = s->avctx->me_cmp;
1386  c->mb_cmp = s->avctx->mb_cmp;
1387  c->me_sub_cmp = s->avctx->me_sub_cmp;
1388  c->pix_fmt = AV_PIX_FMT_YUV420P;
1389  c->time_base = s->avctx->time_base;
1390  c->max_b_frames = s->max_b_frames;
1391 
1392  ret = avcodec_open2(c, s->avctx->codec, NULL);
1393  if (ret < 0)
1394  goto fail;
1395 
1396 
1397  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1398  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1399 
1400  out_size = encode_frame(c, s->tmp_frames[0], pkt);
1401  if (out_size < 0) {
1402  ret = out_size;
1403  goto fail;
1404  }
1405 
1406  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1407 
1408  for (i = 0; i < s->max_b_frames + 1; i++) {
1409  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1410 
1411  s->tmp_frames[i + 1]->pict_type = is_p ?
1413  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1414 
1415  out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1416  if (out_size < 0) {
1417  ret = out_size;
1418  goto fail;
1419  }
1420 
1421  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1422  }
1423 
1424  /* get the delayed frames */
1426  if (out_size < 0) {
1427  ret = out_size;
1428  goto fail;
1429  }
1430  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1431 
1432  rd += c->error[0] + c->error[1] + c->error[2];
1433 
1434  if (rd < best_rd) {
1435  best_rd = rd;
1436  best_b_count = j;
1437  }
1438 
1439 fail:
1442  if (ret < 0) {
1443  best_b_count = ret;
1444  break;
1445  }
1446  }
1447 
1448  av_packet_free(&pkt);
1449 
1450  return best_b_count;
1451 }
1452 
1454 {
1455  int i, ret;
1456 
1457  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1458  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1459  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1460 
1461  /* set next picture type & ordering */
1462  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1463  if (s->frame_skip_threshold || s->frame_skip_factor) {
1464  if (s->picture_in_gop_number < s->gop_size &&
1465  s->next_picture_ptr &&
1466  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1467  // FIXME check that the gop check above is +-1 correct
1468  av_frame_unref(s->input_picture[0]->f);
1469 
1470  ff_vbv_update(s, 0);
1471 
1472  goto no_output_pic;
1473  }
1474  }
1475 
1476  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1477  !s->next_picture_ptr || s->intra_only) {
1478  s->reordered_input_picture[0] = s->input_picture[0];
1479  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1480  s->reordered_input_picture[0]->coded_picture_number =
1481  s->coded_picture_number++;
1482  } else {
1483  int b_frames = 0;
1484 
1485  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1486  for (i = 0; i < s->max_b_frames + 1; i++) {
1487  int pict_num = s->input_picture[0]->display_picture_number + i;
1488 
1489  if (pict_num >= s->rc_context.num_entries)
1490  break;
1491  if (!s->input_picture[i]) {
1492  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1493  break;
1494  }
1495 
1496  s->input_picture[i]->f->pict_type =
1497  s->rc_context.entry[pict_num].new_pict_type;
1498  }
1499  }
1500 
1501  if (s->b_frame_strategy == 0) {
1502  b_frames = s->max_b_frames;
1503  while (b_frames && !s->input_picture[b_frames])
1504  b_frames--;
1505  } else if (s->b_frame_strategy == 1) {
1506  for (i = 1; i < s->max_b_frames + 1; i++) {
1507  if (s->input_picture[i] &&
1508  s->input_picture[i]->b_frame_score == 0) {
1509  s->input_picture[i]->b_frame_score =
1511  s->input_picture[i ]->f->data[0],
1512  s->input_picture[i - 1]->f->data[0],
1513  s->linesize) + 1;
1514  }
1515  }
1516  for (i = 0; i < s->max_b_frames + 1; i++) {
1517  if (!s->input_picture[i] ||
1518  s->input_picture[i]->b_frame_score - 1 >
1519  s->mb_num / s->b_sensitivity)
1520  break;
1521  }
1522 
1523  b_frames = FFMAX(0, i - 1);
1524 
1525  /* reset scores */
1526  for (i = 0; i < b_frames + 1; i++) {
1527  s->input_picture[i]->b_frame_score = 0;
1528  }
1529  } else if (s->b_frame_strategy == 2) {
1530  b_frames = estimate_best_b_count(s);
1531  if (b_frames < 0)
1532  return b_frames;
1533  }
1534 
1535  emms_c();
1536 
1537  for (i = b_frames - 1; i >= 0; i--) {
1538  int type = s->input_picture[i]->f->pict_type;
1539  if (type && type != AV_PICTURE_TYPE_B)
1540  b_frames = i;
1541  }
1542  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1543  b_frames == s->max_b_frames) {
1544  av_log(s->avctx, AV_LOG_ERROR,
1545  "warning, too many B-frames in a row\n");
1546  }
1547 
1548  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1549  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1550  s->gop_size > s->picture_in_gop_number) {
1551  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1552  } else {
1553  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1554  b_frames = 0;
1555  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1556  }
1557  }
1558 
1559  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1560  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1561  b_frames--;
1562 
1563  s->reordered_input_picture[0] = s->input_picture[b_frames];
1564  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1565  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1566  s->reordered_input_picture[0]->coded_picture_number =
1567  s->coded_picture_number++;
1568  for (i = 0; i < b_frames; i++) {
1569  s->reordered_input_picture[i + 1] = s->input_picture[i];
1570  s->reordered_input_picture[i + 1]->f->pict_type =
1572  s->reordered_input_picture[i + 1]->coded_picture_number =
1573  s->coded_picture_number++;
1574  }
1575  }
1576  }
1577 no_output_pic:
1578  av_frame_unref(s->new_picture);
1579 
1580  if (s->reordered_input_picture[0]) {
1581  s->reordered_input_picture[0]->reference =
1582  s->reordered_input_picture[0]->f->pict_type !=
1583  AV_PICTURE_TYPE_B ? 3 : 0;
1584 
1585  if ((ret = av_frame_ref(s->new_picture,
1586  s->reordered_input_picture[0]->f)))
1587  return ret;
1588 
1589  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1590  // input is a shared pix, so we can't modify it -> allocate a new
1591  // one & ensure that the shared one is reuseable
1592 
1593  Picture *pic;
1594  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1595  if (i < 0)
1596  return i;
1597  pic = &s->picture[i];
1598 
1599  pic->reference = s->reordered_input_picture[0]->reference;
1600  if (alloc_picture(s, pic, 0) < 0) {
1601  return -1;
1602  }
1603 
1604  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1605  if (ret < 0)
1606  return ret;
1607  pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
1608  pic->display_picture_number = s->reordered_input_picture[0]->display_picture_number;
1609 
1610  /* mark us unused / free shared pic */
1611  av_frame_unref(s->reordered_input_picture[0]->f);
1612  s->reordered_input_picture[0]->shared = 0;
1613 
1614  s->current_picture_ptr = pic;
1615  } else {
1616  // input is not a shared pix -> reuse buffer for current_pix
1617  s->current_picture_ptr = s->reordered_input_picture[0];
1618  for (i = 0; i < 4; i++) {
1619  if (s->new_picture->data[i])
1620  s->new_picture->data[i] += INPLACE_OFFSET;
1621  }
1622  }
1623  s->picture_number = s->current_picture_ptr->display_picture_number;
1624 
1625  }
1626  return 0;
1627 }
1628 
1630 {
1631  if (s->unrestricted_mv &&
1632  s->current_picture.reference &&
1633  !s->intra_only) {
1634  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1635  int hshift = desc->log2_chroma_w;
1636  int vshift = desc->log2_chroma_h;
1637  s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1638  s->current_picture.f->linesize[0],
1639  s->h_edge_pos, s->v_edge_pos,
1641  EDGE_TOP | EDGE_BOTTOM);
1642  s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1643  s->current_picture.f->linesize[1],
1644  s->h_edge_pos >> hshift,
1645  s->v_edge_pos >> vshift,
1646  EDGE_WIDTH >> hshift,
1647  EDGE_WIDTH >> vshift,
1648  EDGE_TOP | EDGE_BOTTOM);
1649  s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1650  s->current_picture.f->linesize[2],
1651  s->h_edge_pos >> hshift,
1652  s->v_edge_pos >> vshift,
1653  EDGE_WIDTH >> hshift,
1654  EDGE_WIDTH >> vshift,
1655  EDGE_TOP | EDGE_BOTTOM);
1656  }
1657 
1658  emms_c();
1659 
1660  s->last_pict_type = s->pict_type;
1661  s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1662  if (s->pict_type!= AV_PICTURE_TYPE_B)
1663  s->last_non_b_pict_type = s->pict_type;
1664 }
1665 
1667 {
1668  int intra, i;
1669 
1670  for (intra = 0; intra < 2; intra++) {
1671  if (s->dct_count[intra] > (1 << 16)) {
1672  for (i = 0; i < 64; i++) {
1673  s->dct_error_sum[intra][i] >>= 1;
1674  }
1675  s->dct_count[intra] >>= 1;
1676  }
1677 
1678  for (i = 0; i < 64; i++) {
1679  s->dct_offset[intra][i] = (s->noise_reduction *
1680  s->dct_count[intra] +
1681  s->dct_error_sum[intra][i] / 2) /
1682  (s->dct_error_sum[intra][i] + 1);
1683  }
1684  }
1685 }
1686 
1688 {
1689  int ret;
1690 
1691  /* mark & release old frames */
1692  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1693  s->last_picture_ptr != s->next_picture_ptr &&
1694  s->last_picture_ptr->f->buf[0]) {
1695  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1696  }
1697 
1698  s->current_picture_ptr->f->pict_type = s->pict_type;
1699  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1700 
1701  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1702  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1703  s->current_picture_ptr)) < 0)
1704  return ret;
1705 
1706  if (s->pict_type != AV_PICTURE_TYPE_B) {
1707  s->last_picture_ptr = s->next_picture_ptr;
1708  s->next_picture_ptr = s->current_picture_ptr;
1709  }
1710 
1711  if (s->last_picture_ptr) {
1712  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1713  if (s->last_picture_ptr->f->buf[0] &&
1714  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1715  s->last_picture_ptr)) < 0)
1716  return ret;
1717  }
1718  if (s->next_picture_ptr) {
1719  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1720  if (s->next_picture_ptr->f->buf[0] &&
1721  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1722  s->next_picture_ptr)) < 0)
1723  return ret;
1724  }
1725 
1726  if (s->picture_structure!= PICT_FRAME) {
1727  int i;
1728  for (i = 0; i < 4; i++) {
1729  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1730  s->current_picture.f->data[i] +=
1731  s->current_picture.f->linesize[i];
1732  }
1733  s->current_picture.f->linesize[i] *= 2;
1734  s->last_picture.f->linesize[i] *= 2;
1735  s->next_picture.f->linesize[i] *= 2;
1736  }
1737  }
1738 
1739  if (s->dct_error_sum) {
1740  av_assert2(s->noise_reduction && s->encoding);
1742  }
1743 
1744  return 0;
1745 }
1746 
1748  const AVFrame *pic_arg, int *got_packet)
1749 {
1751  int i, stuffing_count, ret;
1752  int context_count = s->slice_context_count;
1753 
1754  s->vbv_ignore_qmax = 0;
1755 
1756  s->picture_in_gop_number++;
1757 
1758  if (load_input_picture(s, pic_arg) < 0)
1759  return -1;
1760 
1761  if (select_input_picture(s) < 0) {
1762  return -1;
1763  }
1764 
1765  /* output? */
1766  if (s->new_picture->data[0]) {
1767  int growing_buffer = context_count == 1 && !s->data_partitioning;
1768  size_t pkt_size = 10000 + s->mb_width * s->mb_height *
1769  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1770  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1771  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_picture, &pkt_size);
1772  if (ret < 0)
1773  return ret;
1774  }
1775  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1776  return ret;
1778  if (s->mb_info) {
1779  s->mb_info_ptr = av_packet_new_side_data(pkt,
1781  s->mb_width*s->mb_height*12);
1782  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1783  }
1784 
1785  for (i = 0; i < context_count; i++) {
1786  int start_y = s->thread_context[i]->start_mb_y;
1787  int end_y = s->thread_context[i]-> end_mb_y;
1788  int h = s->mb_height;
1789  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1790  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1791 
1792  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1793  }
1794 
1795  s->pict_type = s->new_picture->pict_type;
1796  //emms_c();
1797  ret = frame_start(s);
1798  if (ret < 0)
1799  return ret;
1800 vbv_retry:
1801  ret = encode_picture(s);
1802  if (growing_buffer) {
1803  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1804  pkt->data = s->pb.buf;
1806  }
1807  if (ret < 0)
1808  return -1;
1809 
1810  frame_end(s);
1811 
1812  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1813  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1814 
1815  if (avctx->rc_buffer_size) {
1816  RateControlContext *rcc = &s->rc_context;
1817  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1818  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1819  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1820 
1821  if (put_bits_count(&s->pb) > max_size &&
1822  s->lambda < s->lmax) {
1823  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1824  (s->qscale + 1) / s->qscale);
1825  if (s->adaptive_quant) {
1826  int i;
1827  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1828  s->lambda_table[i] =
1829  FFMAX(s->lambda_table[i] + min_step,
1830  s->lambda_table[i] * (s->qscale + 1) /
1831  s->qscale);
1832  }
1833  s->mb_skipped = 0; // done in frame_start()
1834  // done in encode_picture() so we must undo it
1835  if (s->pict_type == AV_PICTURE_TYPE_P) {
1836  if (s->flipflop_rounding ||
1837  s->codec_id == AV_CODEC_ID_H263P ||
1838  s->codec_id == AV_CODEC_ID_MPEG4)
1839  s->no_rounding ^= 1;
1840  }
1841  if (s->pict_type != AV_PICTURE_TYPE_B) {
1842  s->time_base = s->last_time_base;
1843  s->last_non_b_time = s->time - s->pp_time;
1844  }
1845  for (i = 0; i < context_count; i++) {
1846  PutBitContext *pb = &s->thread_context[i]->pb;
1847  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1848  }
1849  s->vbv_ignore_qmax = 1;
1850  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1851  goto vbv_retry;
1852  }
1853 
1855  }
1856 
1859 
1860  for (i = 0; i < 4; i++) {
1861  avctx->error[i] += s->encoding_error[i];
1862  }
1863  ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1864  s->encoding_error,
1866  s->pict_type);
1867 
1869  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1870  s->misc_bits + s->i_tex_bits +
1871  s->p_tex_bits);
1872  flush_put_bits(&s->pb);
1873  s->frame_bits = put_bits_count(&s->pb);
1874 
1875  stuffing_count = ff_vbv_update(s, s->frame_bits);
1876  s->stuffing_bits = 8*stuffing_count;
1877  if (stuffing_count) {
1878  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1879  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1880  return -1;
1881  }
1882 
1883  switch (s->codec_id) {
1886  while (stuffing_count--) {
1887  put_bits(&s->pb, 8, 0);
1888  }
1889  break;
1890  case AV_CODEC_ID_MPEG4:
1891  put_bits(&s->pb, 16, 0);
1892  put_bits(&s->pb, 16, 0x1C3);
1893  stuffing_count -= 4;
1894  while (stuffing_count--) {
1895  put_bits(&s->pb, 8, 0xFF);
1896  }
1897  break;
1898  default:
1899  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1900  s->stuffing_bits = 0;
1901  }
1902  flush_put_bits(&s->pb);
1903  s->frame_bits = put_bits_count(&s->pb);
1904  }
1905 
1906  /* update MPEG-1/2 vbv_delay for CBR */
1907  if (avctx->rc_max_rate &&
1909  s->out_format == FMT_MPEG1 &&
1910  90000LL * (avctx->rc_buffer_size - 1) <=
1911  avctx->rc_max_rate * 0xFFFFLL) {
1912  AVCPBProperties *props;
1913  size_t props_size;
1914 
1915  int vbv_delay, min_delay;
1916  double inbits = avctx->rc_max_rate *
1918  int minbits = s->frame_bits - 8 *
1919  (s->vbv_delay_pos - 1);
1920  double bits = s->rc_context.buffer_index + minbits - inbits;
1921  uint8_t *const vbv_delay_ptr = s->pb.buf + s->vbv_delay_pos;
1922 
1923  if (bits < 0)
1925  "Internal error, negative bits\n");
1926 
1927  av_assert1(s->repeat_first_field == 0);
1928 
1929  vbv_delay = bits * 90000 / avctx->rc_max_rate;
1930  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1931  avctx->rc_max_rate;
1932 
1933  vbv_delay = FFMAX(vbv_delay, min_delay);
1934 
1935  av_assert0(vbv_delay < 0xFFFF);
1936 
1937  vbv_delay_ptr[0] &= 0xF8;
1938  vbv_delay_ptr[0] |= vbv_delay >> 13;
1939  vbv_delay_ptr[1] = vbv_delay >> 5;
1940  vbv_delay_ptr[2] &= 0x07;
1941  vbv_delay_ptr[2] |= vbv_delay << 3;
1942 
1943  props = av_cpb_properties_alloc(&props_size);
1944  if (!props)
1945  return AVERROR(ENOMEM);
1946  props->vbv_delay = vbv_delay * 300;
1947 
1949  (uint8_t*)props, props_size);
1950  if (ret < 0) {
1951  av_freep(&props);
1952  return ret;
1953  }
1954  }
1955  s->total_bits += s->frame_bits;
1956 
1957  pkt->pts = s->current_picture.f->pts;
1958  pkt->duration = s->current_picture.f->duration;
1959  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1960  if (!s->current_picture.coded_picture_number)
1961  pkt->dts = pkt->pts - s->dts_delta;
1962  else
1963  pkt->dts = s->reordered_pts;
1964  s->reordered_pts = pkt->pts;
1965  } else
1966  pkt->dts = pkt->pts;
1967 
1968  // the no-delay case is handled in generic code
1970  ret = ff_encode_reordered_opaque(avctx, pkt, s->current_picture.f);
1971  if (ret < 0)
1972  return ret;
1973  }
1974 
1975  if (s->current_picture.f->key_frame)
1977  if (s->mb_info)
1979  } else {
1980  s->frame_bits = 0;
1981  }
1982 
1983  /* release non-reference frames */
1984  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1985  if (!s->picture[i].reference)
1986  ff_mpeg_unref_picture(avctx, &s->picture[i]);
1987  }
1988 
1989  av_assert1((s->frame_bits & 7) == 0);
1990 
1991  pkt->size = s->frame_bits / 8;
1992  *got_packet = !!pkt->size;
1993  return 0;
1994 }
1995 
1997  int n, int threshold)
1998 {
1999  static const char tab[64] = {
2000  3, 2, 2, 1, 1, 1, 1, 1,
2001  1, 1, 1, 1, 1, 1, 1, 1,
2002  1, 1, 1, 1, 1, 1, 1, 1,
2003  0, 0, 0, 0, 0, 0, 0, 0,
2004  0, 0, 0, 0, 0, 0, 0, 0,
2005  0, 0, 0, 0, 0, 0, 0, 0,
2006  0, 0, 0, 0, 0, 0, 0, 0,
2007  0, 0, 0, 0, 0, 0, 0, 0
2008  };
2009  int score = 0;
2010  int run = 0;
2011  int i;
2012  int16_t *block = s->block[n];
2013  const int last_index = s->block_last_index[n];
2014  int skip_dc;
2015 
2016  if (threshold < 0) {
2017  skip_dc = 0;
2018  threshold = -threshold;
2019  } else
2020  skip_dc = 1;
2021 
2022  /* Are all we could set to zero already zero? */
2023  if (last_index <= skip_dc - 1)
2024  return;
2025 
2026  for (i = 0; i <= last_index; i++) {
2027  const int j = s->intra_scantable.permutated[i];
2028  const int level = FFABS(block[j]);
2029  if (level == 1) {
2030  if (skip_dc && i == 0)
2031  continue;
2032  score += tab[run];
2033  run = 0;
2034  } else if (level > 1) {
2035  return;
2036  } else {
2037  run++;
2038  }
2039  }
2040  if (score >= threshold)
2041  return;
2042  for (i = skip_dc; i <= last_index; i++) {
2043  const int j = s->intra_scantable.permutated[i];
2044  block[j] = 0;
2045  }
2046  if (block[0])
2047  s->block_last_index[n] = 0;
2048  else
2049  s->block_last_index[n] = -1;
2050 }
2051 
2052 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2053  int last_index)
2054 {
2055  int i;
2056  const int maxlevel = s->max_qcoeff;
2057  const int minlevel = s->min_qcoeff;
2058  int overflow = 0;
2059 
2060  if (s->mb_intra) {
2061  i = 1; // skip clipping of intra dc
2062  } else
2063  i = 0;
2064 
2065  for (; i <= last_index; i++) {
2066  const int j = s->intra_scantable.permutated[i];
2067  int level = block[j];
2068 
2069  if (level > maxlevel) {
2070  level = maxlevel;
2071  overflow++;
2072  } else if (level < minlevel) {
2073  level = minlevel;
2074  overflow++;
2075  }
2076 
2077  block[j] = level;
2078  }
2079 
2080  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2081  av_log(s->avctx, AV_LOG_INFO,
2082  "warning, clipping %d dct coefficients to %d..%d\n",
2083  overflow, minlevel, maxlevel);
2084 }
2085 
2086 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2087 {
2088  int x, y;
2089  // FIXME optimize
2090  for (y = 0; y < 8; y++) {
2091  for (x = 0; x < 8; x++) {
2092  int x2, y2;
2093  int sum = 0;
2094  int sqr = 0;
2095  int count = 0;
2096 
2097  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2098  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2099  int v = ptr[x2 + y2 * stride];
2100  sum += v;
2101  sqr += v * v;
2102  count++;
2103  }
2104  }
2105  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2106  }
2107  }
2108 }
2109 
2111  int motion_x, int motion_y,
2112  int mb_block_height,
2113  int mb_block_width,
2114  int mb_block_count,
2115  int chroma_x_shift,
2116  int chroma_y_shift,
2117  int chroma_format)
2118 {
2119 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2120  * and neither of these encoders currently supports 444. */
2121 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2122  (s)->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2123  int16_t weight[12][64];
2124  int16_t orig[12][64];
2125  const int mb_x = s->mb_x;
2126  const int mb_y = s->mb_y;
2127  int i;
2128  int skip_dct[12];
2129  int dct_offset = s->linesize * 8; // default for progressive frames
2130  int uv_dct_offset = s->uvlinesize * 8;
2131  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2132  ptrdiff_t wrap_y, wrap_c;
2133 
2134  for (i = 0; i < mb_block_count; i++)
2135  skip_dct[i] = s->skipdct;
2136 
2137  if (s->adaptive_quant) {
2138  const int last_qp = s->qscale;
2139  const int mb_xy = mb_x + mb_y * s->mb_stride;
2140 
2141  s->lambda = s->lambda_table[mb_xy];
2142  update_qscale(s);
2143 
2144  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2145  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2146  s->dquant = s->qscale - last_qp;
2147 
2148  if (s->out_format == FMT_H263) {
2149  s->dquant = av_clip(s->dquant, -2, 2);
2150 
2151  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2152  if (!s->mb_intra) {
2153  if (s->pict_type == AV_PICTURE_TYPE_B) {
2154  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2155  s->dquant = 0;
2156  }
2157  if (s->mv_type == MV_TYPE_8X8)
2158  s->dquant = 0;
2159  }
2160  }
2161  }
2162  }
2163  ff_set_qscale(s, last_qp + s->dquant);
2164  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2165  ff_set_qscale(s, s->qscale + s->dquant);
2166 
2167  wrap_y = s->linesize;
2168  wrap_c = s->uvlinesize;
2169  ptr_y = s->new_picture->data[0] +
2170  (mb_y * 16 * wrap_y) + mb_x * 16;
2171  ptr_cb = s->new_picture->data[1] +
2172  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2173  ptr_cr = s->new_picture->data[2] +
2174  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2175 
2176  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2177  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2178  int cw = (s->width + chroma_x_shift) >> chroma_x_shift;
2179  int ch = (s->height + chroma_y_shift) >> chroma_y_shift;
2180  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2181  wrap_y, wrap_y,
2182  16, 16, mb_x * 16, mb_y * 16,
2183  s->width, s->height);
2184  ptr_y = ebuf;
2185  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2186  wrap_c, wrap_c,
2187  mb_block_width, mb_block_height,
2188  mb_x * mb_block_width, mb_y * mb_block_height,
2189  cw, ch);
2190  ptr_cb = ebuf + 16 * wrap_y;
2191  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2192  wrap_c, wrap_c,
2193  mb_block_width, mb_block_height,
2194  mb_x * mb_block_width, mb_y * mb_block_height,
2195  cw, ch);
2196  ptr_cr = ebuf + 16 * wrap_y + 16;
2197  }
2198 
2199  if (s->mb_intra) {
2200  if (INTERLACED_DCT(s)) {
2201  int progressive_score, interlaced_score;
2202 
2203  s->interlaced_dct = 0;
2204  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2205  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2206  NULL, wrap_y, 8) - 400;
2207 
2208  if (progressive_score > 0) {
2209  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2210  NULL, wrap_y * 2, 8) +
2211  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2212  NULL, wrap_y * 2, 8);
2213  if (progressive_score > interlaced_score) {
2214  s->interlaced_dct = 1;
2215 
2216  dct_offset = wrap_y;
2217  uv_dct_offset = wrap_c;
2218  wrap_y <<= 1;
2219  if (chroma_format == CHROMA_422 ||
2221  wrap_c <<= 1;
2222  }
2223  }
2224  }
2225 
2226  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2227  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2228  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2229  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2230 
2231  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2232  skip_dct[4] = 1;
2233  skip_dct[5] = 1;
2234  } else {
2235  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2236  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2237  if (chroma_format == CHROMA_422) {
2238  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2239  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2240  } else if (chroma_format == CHROMA_444) {
2241  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2242  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2243  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2244  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2245  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2246  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2247  }
2248  }
2249  } else {
2250  op_pixels_func (*op_pix)[4];
2251  qpel_mc_func (*op_qpix)[16];
2252  uint8_t *dest_y, *dest_cb, *dest_cr;
2253 
2254  dest_y = s->dest[0];
2255  dest_cb = s->dest[1];
2256  dest_cr = s->dest[2];
2257 
2258  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2259  op_pix = s->hdsp.put_pixels_tab;
2260  op_qpix = s->qdsp.put_qpel_pixels_tab;
2261  } else {
2262  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2263  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2264  }
2265 
2266  if (s->mv_dir & MV_DIR_FORWARD) {
2267  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2268  s->last_picture.f->data,
2269  op_pix, op_qpix);
2270  op_pix = s->hdsp.avg_pixels_tab;
2271  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2272  }
2273  if (s->mv_dir & MV_DIR_BACKWARD) {
2274  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2275  s->next_picture.f->data,
2276  op_pix, op_qpix);
2277  }
2278 
2279  if (INTERLACED_DCT(s)) {
2280  int progressive_score, interlaced_score;
2281 
2282  s->interlaced_dct = 0;
2283  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2284  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2285  ptr_y + wrap_y * 8,
2286  wrap_y, 8) - 400;
2287 
2288  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2289  progressive_score -= 400;
2290 
2291  if (progressive_score > 0) {
2292  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2293  wrap_y * 2, 8) +
2294  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2295  ptr_y + wrap_y,
2296  wrap_y * 2, 8);
2297 
2298  if (progressive_score > interlaced_score) {
2299  s->interlaced_dct = 1;
2300 
2301  dct_offset = wrap_y;
2302  uv_dct_offset = wrap_c;
2303  wrap_y <<= 1;
2304  if (chroma_format == CHROMA_422)
2305  wrap_c <<= 1;
2306  }
2307  }
2308  }
2309 
2310  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2311  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2312  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2313  dest_y + dct_offset, wrap_y);
2314  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2315  dest_y + dct_offset + 8, wrap_y);
2316 
2317  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2318  skip_dct[4] = 1;
2319  skip_dct[5] = 1;
2320  } else {
2321  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2322  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2323  if (!chroma_y_shift) { /* 422 */
2324  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2325  dest_cb + uv_dct_offset, wrap_c);
2326  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2327  dest_cr + uv_dct_offset, wrap_c);
2328  }
2329  }
2330  /* pre quantization */
2331  if (s->mc_mb_var[s->mb_stride * mb_y + mb_x] < 2 * s->qscale * s->qscale) {
2332  // FIXME optimize
2333  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2334  skip_dct[0] = 1;
2335  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2336  skip_dct[1] = 1;
2337  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2338  wrap_y, 8) < 20 * s->qscale)
2339  skip_dct[2] = 1;
2340  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2341  wrap_y, 8) < 20 * s->qscale)
2342  skip_dct[3] = 1;
2343  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2344  skip_dct[4] = 1;
2345  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2346  skip_dct[5] = 1;
2347  if (!chroma_y_shift) { /* 422 */
2348  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2349  dest_cb + uv_dct_offset,
2350  wrap_c, 8) < 20 * s->qscale)
2351  skip_dct[6] = 1;
2352  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2353  dest_cr + uv_dct_offset,
2354  wrap_c, 8) < 20 * s->qscale)
2355  skip_dct[7] = 1;
2356  }
2357  }
2358  }
2359 
2360  if (s->quantizer_noise_shaping) {
2361  if (!skip_dct[0])
2362  get_visual_weight(weight[0], ptr_y , wrap_y);
2363  if (!skip_dct[1])
2364  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2365  if (!skip_dct[2])
2366  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2367  if (!skip_dct[3])
2368  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2369  if (!skip_dct[4])
2370  get_visual_weight(weight[4], ptr_cb , wrap_c);
2371  if (!skip_dct[5])
2372  get_visual_weight(weight[5], ptr_cr , wrap_c);
2373  if (!chroma_y_shift) { /* 422 */
2374  if (!skip_dct[6])
2375  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2376  wrap_c);
2377  if (!skip_dct[7])
2378  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2379  wrap_c);
2380  }
2381  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2382  }
2383 
2384  /* DCT & quantize */
2385  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2386  {
2387  for (i = 0; i < mb_block_count; i++) {
2388  if (!skip_dct[i]) {
2389  int overflow;
2390  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2391  // FIXME we could decide to change to quantizer instead of
2392  // clipping
2393  // JS: I don't think that would be a good idea it could lower
2394  // quality instead of improve it. Just INTRADC clipping
2395  // deserves changes in quantizer
2396  if (overflow)
2397  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2398  } else
2399  s->block_last_index[i] = -1;
2400  }
2401  if (s->quantizer_noise_shaping) {
2402  for (i = 0; i < mb_block_count; i++) {
2403  if (!skip_dct[i]) {
2404  s->block_last_index[i] =
2405  dct_quantize_refine(s, s->block[i], weight[i],
2406  orig[i], i, s->qscale);
2407  }
2408  }
2409  }
2410 
2411  if (s->luma_elim_threshold && !s->mb_intra)
2412  for (i = 0; i < 4; i++)
2413  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2414  if (s->chroma_elim_threshold && !s->mb_intra)
2415  for (i = 4; i < mb_block_count; i++)
2416  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2417 
2418  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2419  for (i = 0; i < mb_block_count; i++) {
2420  if (s->block_last_index[i] == -1)
2421  s->coded_score[i] = INT_MAX / 256;
2422  }
2423  }
2424  }
2425 
2426  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2427  s->block_last_index[4] =
2428  s->block_last_index[5] = 0;
2429  s->block[4][0] =
2430  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2431  if (!chroma_y_shift) { /* 422 / 444 */
2432  for (i=6; i<12; i++) {
2433  s->block_last_index[i] = 0;
2434  s->block[i][0] = s->block[4][0];
2435  }
2436  }
2437  }
2438 
2439  // non c quantize code returns incorrect block_last_index FIXME
2440  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2441  for (i = 0; i < mb_block_count; i++) {
2442  int j;
2443  if (s->block_last_index[i] > 0) {
2444  for (j = 63; j > 0; j--) {
2445  if (s->block[i][s->intra_scantable.permutated[j]])
2446  break;
2447  }
2448  s->block_last_index[i] = j;
2449  }
2450  }
2451  }
2452 
2453  /* huffman encode */
2454  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2457  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2458  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2459  break;
2460  case AV_CODEC_ID_MPEG4:
2461  if (CONFIG_MPEG4_ENCODER)
2462  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2463  break;
2464  case AV_CODEC_ID_MSMPEG4V2:
2465  case AV_CODEC_ID_MSMPEG4V3:
2466  case AV_CODEC_ID_WMV1:
2467  if (CONFIG_MSMPEG4ENC)
2468  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2469  break;
2470  case AV_CODEC_ID_WMV2:
2471  if (CONFIG_WMV2_ENCODER)
2472  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2473  break;
2474  case AV_CODEC_ID_H261:
2475  if (CONFIG_H261_ENCODER)
2476  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2477  break;
2478  case AV_CODEC_ID_H263:
2479  case AV_CODEC_ID_H263P:
2480  case AV_CODEC_ID_FLV1:
2481  case AV_CODEC_ID_RV10:
2482  case AV_CODEC_ID_RV20:
2483  if (CONFIG_H263_ENCODER)
2484  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2485  break;
2486 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2487  case AV_CODEC_ID_MJPEG:
2488  case AV_CODEC_ID_AMV:
2489  ff_mjpeg_encode_mb(s, s->block);
2490  break;
2491 #endif
2492  case AV_CODEC_ID_SPEEDHQ:
2493  if (CONFIG_SPEEDHQ_ENCODER)
2494  ff_speedhq_encode_mb(s, s->block);
2495  break;
2496  default:
2497  av_assert1(0);
2498  }
2499 }
2500 
2501 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2502 {
2503  if (s->chroma_format == CHROMA_420)
2504  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2505  else if (s->chroma_format == CHROMA_422)
2506  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2507  else
2508  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2509 }
2510 
2512  const MpegEncContext *s)
2513 {
2514  int i;
2515 
2516  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2517 
2518  /* MPEG-1 */
2519  d->mb_skip_run= s->mb_skip_run;
2520  for(i=0; i<3; i++)
2521  d->last_dc[i] = s->last_dc[i];
2522 
2523  /* statistics */
2524  d->mv_bits= s->mv_bits;
2525  d->i_tex_bits= s->i_tex_bits;
2526  d->p_tex_bits= s->p_tex_bits;
2527  d->i_count= s->i_count;
2528  d->skip_count= s->skip_count;
2529  d->misc_bits= s->misc_bits;
2530  d->last_bits= 0;
2531 
2532  d->mb_skipped= 0;
2533  d->qscale= s->qscale;
2534  d->dquant= s->dquant;
2535 
2536  d->esc3_level_length= s->esc3_level_length;
2537 }
2538 
2540  const MpegEncContext *s)
2541 {
2542  int i;
2543 
2544  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2545  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2546 
2547  /* MPEG-1 */
2548  d->mb_skip_run= s->mb_skip_run;
2549  for(i=0; i<3; i++)
2550  d->last_dc[i] = s->last_dc[i];
2551 
2552  /* statistics */
2553  d->mv_bits= s->mv_bits;
2554  d->i_tex_bits= s->i_tex_bits;
2555  d->p_tex_bits= s->p_tex_bits;
2556  d->i_count= s->i_count;
2557  d->skip_count= s->skip_count;
2558  d->misc_bits= s->misc_bits;
2559 
2560  d->mb_intra= s->mb_intra;
2561  d->mb_skipped= s->mb_skipped;
2562  d->mv_type= s->mv_type;
2563  d->mv_dir= s->mv_dir;
2564  d->pb= s->pb;
2565  if(s->data_partitioning){
2566  d->pb2= s->pb2;
2567  d->tex_pb= s->tex_pb;
2568  }
2569  d->block= s->block;
2570  for(i=0; i<8; i++)
2571  d->block_last_index[i]= s->block_last_index[i];
2572  d->interlaced_dct= s->interlaced_dct;
2573  d->qscale= s->qscale;
2574 
2575  d->esc3_level_length= s->esc3_level_length;
2576 }
2577 
2578 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best,
2580  int *dmin, int *next_block, int motion_x, int motion_y)
2581 {
2582  int score;
2583  uint8_t *dest_backup[3];
2584 
2585  copy_context_before_encode(s, backup);
2586 
2587  s->block= s->blocks[*next_block];
2588  s->pb= pb[*next_block];
2589  if(s->data_partitioning){
2590  s->pb2 = pb2 [*next_block];
2591  s->tex_pb= tex_pb[*next_block];
2592  }
2593 
2594  if(*next_block){
2595  memcpy(dest_backup, s->dest, sizeof(s->dest));
2596  s->dest[0] = s->sc.rd_scratchpad;
2597  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2598  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2599  av_assert0(s->linesize >= 32); //FIXME
2600  }
2601 
2602  encode_mb(s, motion_x, motion_y);
2603 
2604  score= put_bits_count(&s->pb);
2605  if(s->data_partitioning){
2606  score+= put_bits_count(&s->pb2);
2607  score+= put_bits_count(&s->tex_pb);
2608  }
2609 
2610  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2611  mpv_reconstruct_mb(s, s->block);
2612 
2613  score *= s->lambda2;
2614  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2615  }
2616 
2617  if(*next_block){
2618  memcpy(s->dest, dest_backup, sizeof(s->dest));
2619  }
2620 
2621  if(score<*dmin){
2622  *dmin= score;
2623  *next_block^=1;
2624 
2626  }
2627 }
2628 
2629 static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride){
2630  const uint32_t *sq = ff_square_tab + 256;
2631  int acc=0;
2632  int x,y;
2633 
2634  if(w==16 && h==16)
2635  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2636  else if(w==8 && h==8)
2637  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2638 
2639  for(y=0; y<h; y++){
2640  for(x=0; x<w; x++){
2641  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2642  }
2643  }
2644 
2645  av_assert2(acc>=0);
2646 
2647  return acc;
2648 }
2649 
2650 static int sse_mb(MpegEncContext *s){
2651  int w= 16;
2652  int h= 16;
2653  int chroma_mb_w = w >> s->chroma_x_shift;
2654  int chroma_mb_h = h >> s->chroma_y_shift;
2655 
2656  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2657  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2658 
2659  if(w==16 && h==16)
2660  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2661  return s->mecc.nsse[0](s, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2662  s->dest[0], s->linesize, 16) +
2663  s->mecc.nsse[1](s, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2664  s->dest[1], s->uvlinesize, chroma_mb_h) +
2665  s->mecc.nsse[1](s, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2666  s->dest[2], s->uvlinesize, chroma_mb_h);
2667  }else{
2668  return s->mecc.sse[0](NULL, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2669  s->dest[0], s->linesize, 16) +
2670  s->mecc.sse[1](NULL, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2671  s->dest[1], s->uvlinesize, chroma_mb_h) +
2672  s->mecc.sse[1](NULL, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2673  s->dest[2], s->uvlinesize, chroma_mb_h);
2674  }
2675  else
2676  return sse(s, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2677  s->dest[0], w, h, s->linesize) +
2678  sse(s, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2679  s->dest[1], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize) +
2680  sse(s, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2681  s->dest[2], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize);
2682 }
2683 
2685  MpegEncContext *s= *(void**)arg;
2686 
2687 
2688  s->me.pre_pass=1;
2689  s->me.dia_size= s->avctx->pre_dia_size;
2690  s->first_slice_line=1;
2691  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2692  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2693  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2694  }
2695  s->first_slice_line=0;
2696  }
2697 
2698  s->me.pre_pass=0;
2699 
2700  return 0;
2701 }
2702 
2704  MpegEncContext *s= *(void**)arg;
2705 
2706  s->me.dia_size= s->avctx->dia_size;
2707  s->first_slice_line=1;
2708  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2709  s->mb_x=0; //for block init below
2711  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2712  s->block_index[0]+=2;
2713  s->block_index[1]+=2;
2714  s->block_index[2]+=2;
2715  s->block_index[3]+=2;
2716 
2717  /* compute motion vector & mb_type and store in context */
2718  if(s->pict_type==AV_PICTURE_TYPE_B)
2719  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2720  else
2721  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2722  }
2723  s->first_slice_line=0;
2724  }
2725  return 0;
2726 }
2727 
2728 static int mb_var_thread(AVCodecContext *c, void *arg){
2729  MpegEncContext *s= *(void**)arg;
2730  int mb_x, mb_y;
2731 
2732  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2733  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2734  int xx = mb_x * 16;
2735  int yy = mb_y * 16;
2736  const uint8_t *pix = s->new_picture->data[0] + (yy * s->linesize) + xx;
2737  int varc;
2738  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2739 
2740  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2741  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2742 
2743  s->mb_var [s->mb_stride * mb_y + mb_x] = varc;
2744  s->mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2745  s->me.mb_var_sum_temp += varc;
2746  }
2747  }
2748  return 0;
2749 }
2750 
2752  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2753  if(s->partitioned_frame){
2755  }
2756 
2757  ff_mpeg4_stuffing(&s->pb);
2758  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2759  s->out_format == FMT_MJPEG) {
2761  } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2763  }
2764 
2765  flush_put_bits(&s->pb);
2766 
2767  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2768  s->misc_bits+= get_bits_diff(s);
2769 }
2770 
2772 {
2773  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2774  int offset = put_bits_count(&s->pb);
2775  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2776  int gobn = s->mb_y / s->gob_index;
2777  int pred_x, pred_y;
2778  if (CONFIG_H263_ENCODER)
2779  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2780  bytestream_put_le32(&ptr, offset);
2781  bytestream_put_byte(&ptr, s->qscale);
2782  bytestream_put_byte(&ptr, gobn);
2783  bytestream_put_le16(&ptr, mba);
2784  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2785  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2786  /* 4MV not implemented */
2787  bytestream_put_byte(&ptr, 0); /* hmv2 */
2788  bytestream_put_byte(&ptr, 0); /* vmv2 */
2789 }
2790 
2791 static void update_mb_info(MpegEncContext *s, int startcode)
2792 {
2793  if (!s->mb_info)
2794  return;
2795  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2796  s->mb_info_size += 12;
2797  s->prev_mb_info = s->last_mb_info;
2798  }
2799  if (startcode) {
2800  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2801  /* This might have incremented mb_info_size above, and we return without
2802  * actually writing any info into that slot yet. But in that case,
2803  * this will be called again at the start of the after writing the
2804  * start code, actually writing the mb info. */
2805  return;
2806  }
2807 
2808  s->last_mb_info = put_bytes_count(&s->pb, 0);
2809  if (!s->mb_info_size)
2810  s->mb_info_size += 12;
2811  write_mb_info(s);
2812 }
2813 
2814 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2815 {
2816  if (put_bytes_left(&s->pb, 0) < threshold
2817  && s->slice_context_count == 1
2818  && s->pb.buf == s->avctx->internal->byte_buffer) {
2819  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2820 
2821  uint8_t *new_buffer = NULL;
2822  int new_buffer_size = 0;
2823 
2824  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2825  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2826  return AVERROR(ENOMEM);
2827  }
2828 
2829  emms_c();
2830 
2831  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2832  s->avctx->internal->byte_buffer_size + size_increase);
2833  if (!new_buffer)
2834  return AVERROR(ENOMEM);
2835 
2836  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2837  av_free(s->avctx->internal->byte_buffer);
2838  s->avctx->internal->byte_buffer = new_buffer;
2839  s->avctx->internal->byte_buffer_size = new_buffer_size;
2840  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2841  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2842  }
2843  if (put_bytes_left(&s->pb, 0) < threshold)
2844  return AVERROR(EINVAL);
2845  return 0;
2846 }
2847 
2848 static int encode_thread(AVCodecContext *c, void *arg){
2849  MpegEncContext *s= *(void**)arg;
2850  int mb_x, mb_y, mb_y_order;
2851  int chr_h= 16>>s->chroma_y_shift;
2852  int i, j;
2853  MpegEncContext best_s = { 0 }, backup_s;
2854  uint8_t bit_buf[2][MAX_MB_BYTES];
2855  uint8_t bit_buf2[2][MAX_MB_BYTES];
2856  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2857  PutBitContext pb[2], pb2[2], tex_pb[2];
2858 
2859  for(i=0; i<2; i++){
2860  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2861  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2862  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2863  }
2864 
2865  s->last_bits= put_bits_count(&s->pb);
2866  s->mv_bits=0;
2867  s->misc_bits=0;
2868  s->i_tex_bits=0;
2869  s->p_tex_bits=0;
2870  s->i_count=0;
2871  s->skip_count=0;
2872 
2873  for(i=0; i<3; i++){
2874  /* init last dc values */
2875  /* note: quant matrix value (8) is implied here */
2876  s->last_dc[i] = 128 << s->intra_dc_precision;
2877 
2878  s->encoding_error[i] = 0;
2879  }
2880  if(s->codec_id==AV_CODEC_ID_AMV){
2881  s->last_dc[0] = 128*8/13;
2882  s->last_dc[1] = 128*8/14;
2883  s->last_dc[2] = 128*8/14;
2884  }
2885  s->mb_skip_run = 0;
2886  memset(s->last_mv, 0, sizeof(s->last_mv));
2887 
2888  s->last_mv_dir = 0;
2889 
2890  switch(s->codec_id){
2891  case AV_CODEC_ID_H263:
2892  case AV_CODEC_ID_H263P:
2893  case AV_CODEC_ID_FLV1:
2894  if (CONFIG_H263_ENCODER)
2895  s->gob_index = H263_GOB_HEIGHT(s->height);
2896  break;
2897  case AV_CODEC_ID_MPEG4:
2898  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2900  break;
2901  }
2902 
2903  s->resync_mb_x=0;
2904  s->resync_mb_y=0;
2905  s->first_slice_line = 1;
2906  s->ptr_lastgob = s->pb.buf;
2907  for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2908  if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2909  int first_in_slice;
2910  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2911  if (first_in_slice && mb_y_order != s->start_mb_y)
2913  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2914  } else {
2915  mb_y = mb_y_order;
2916  }
2917  s->mb_x=0;
2918  s->mb_y= mb_y;
2919 
2920  ff_set_qscale(s, s->qscale);
2922 
2923  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2924  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2925  int mb_type= s->mb_type[xy];
2926 // int d;
2927  int dmin= INT_MAX;
2928  int dir;
2929  int size_increase = s->avctx->internal->byte_buffer_size/4
2930  + s->mb_width*MAX_MB_BYTES;
2931 
2933  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2934  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2935  return -1;
2936  }
2937  if(s->data_partitioning){
2938  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2939  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2940  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2941  return -1;
2942  }
2943  }
2944 
2945  s->mb_x = mb_x;
2946  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2947  ff_update_block_index(s, 8, 0, s->chroma_x_shift);
2948 
2949  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2951  xy= s->mb_y*s->mb_stride + s->mb_x;
2952  mb_type= s->mb_type[xy];
2953  }
2954 
2955  /* write gob / video packet header */
2956  if(s->rtp_mode){
2957  int current_packet_size, is_gob_start;
2958 
2959  current_packet_size = put_bytes_count(&s->pb, 1)
2960  - (s->ptr_lastgob - s->pb.buf);
2961 
2962  is_gob_start = s->rtp_payload_size &&
2963  current_packet_size >= s->rtp_payload_size &&
2964  mb_y + mb_x > 0;
2965 
2966  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2967 
2968  switch(s->codec_id){
2969  case AV_CODEC_ID_H263:
2970  case AV_CODEC_ID_H263P:
2971  if(!s->h263_slice_structured)
2972  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2973  break;
2975  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2977  if(s->mb_skip_run) is_gob_start=0;
2978  break;
2979  case AV_CODEC_ID_MJPEG:
2980  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2981  break;
2982  }
2983 
2984  if(is_gob_start){
2985  if(s->start_mb_y != mb_y || mb_x!=0){
2986  write_slice_end(s);
2987 
2988  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2990  }
2991  }
2992 
2993  av_assert2((put_bits_count(&s->pb)&7) == 0);
2994  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2995 
2996  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2997  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
2998  int d = 100 / s->error_rate;
2999  if(r % d == 0){
3000  current_packet_size=0;
3001  s->pb.buf_ptr= s->ptr_lastgob;
3002  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3003  }
3004  }
3005 
3006  switch(s->codec_id){
3007  case AV_CODEC_ID_MPEG4:
3008  if (CONFIG_MPEG4_ENCODER) {
3011  }
3012  break;
3015  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3018  }
3019  break;
3020  case AV_CODEC_ID_H263:
3021  case AV_CODEC_ID_H263P:
3022  if (CONFIG_H263_ENCODER) {
3023  update_mb_info(s, 1);
3025  }
3026  break;
3027  }
3028 
3029  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3030  int bits= put_bits_count(&s->pb);
3031  s->misc_bits+= bits - s->last_bits;
3032  s->last_bits= bits;
3033  }
3034 
3035  s->ptr_lastgob += current_packet_size;
3036  s->first_slice_line=1;
3037  s->resync_mb_x=mb_x;
3038  s->resync_mb_y=mb_y;
3039  }
3040  }
3041 
3042  if( (s->resync_mb_x == s->mb_x)
3043  && s->resync_mb_y+1 == s->mb_y){
3044  s->first_slice_line=0;
3045  }
3046 
3047  s->mb_skipped=0;
3048  s->dquant=0; //only for QP_RD
3049 
3050  update_mb_info(s, 0);
3051 
3052  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3053  int next_block=0;
3054  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3055 
3056  copy_context_before_encode(&backup_s, s);
3057  backup_s.pb= s->pb;
3058  best_s.data_partitioning= s->data_partitioning;
3059  best_s.partitioned_frame= s->partitioned_frame;
3060  if(s->data_partitioning){
3061  backup_s.pb2= s->pb2;
3062  backup_s.tex_pb= s->tex_pb;
3063  }
3064 
3066  s->mv_dir = MV_DIR_FORWARD;
3067  s->mv_type = MV_TYPE_16X16;
3068  s->mb_intra= 0;
3069  s->mv[0][0][0] = s->p_mv_table[xy][0];
3070  s->mv[0][0][1] = s->p_mv_table[xy][1];
3071  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3072  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3073  }
3075  s->mv_dir = MV_DIR_FORWARD;
3076  s->mv_type = MV_TYPE_FIELD;
3077  s->mb_intra= 0;
3078  for(i=0; i<2; i++){
3079  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3080  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3081  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3082  }
3083  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3084  &dmin, &next_block, 0, 0);
3085  }
3087  s->mv_dir = MV_DIR_FORWARD;
3088  s->mv_type = MV_TYPE_16X16;
3089  s->mb_intra= 0;
3090  s->mv[0][0][0] = 0;
3091  s->mv[0][0][1] = 0;
3092  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3093  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3094  }
3096  s->mv_dir = MV_DIR_FORWARD;
3097  s->mv_type = MV_TYPE_8X8;
3098  s->mb_intra= 0;
3099  for(i=0; i<4; i++){
3100  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3101  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3102  }
3103  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3104  &dmin, &next_block, 0, 0);
3105  }
3107  s->mv_dir = MV_DIR_FORWARD;
3108  s->mv_type = MV_TYPE_16X16;
3109  s->mb_intra= 0;
3110  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3111  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3112  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3113  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3114  }
3116  s->mv_dir = MV_DIR_BACKWARD;
3117  s->mv_type = MV_TYPE_16X16;
3118  s->mb_intra= 0;
3119  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3120  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3121  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3122  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3123  }
3125  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3126  s->mv_type = MV_TYPE_16X16;
3127  s->mb_intra= 0;
3128  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3129  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3130  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3131  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3132  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3133  &dmin, &next_block, 0, 0);
3134  }
3136  s->mv_dir = MV_DIR_FORWARD;
3137  s->mv_type = MV_TYPE_FIELD;
3138  s->mb_intra= 0;
3139  for(i=0; i<2; i++){
3140  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3141  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3142  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3143  }
3144  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3145  &dmin, &next_block, 0, 0);
3146  }
3148  s->mv_dir = MV_DIR_BACKWARD;
3149  s->mv_type = MV_TYPE_FIELD;
3150  s->mb_intra= 0;
3151  for(i=0; i<2; i++){
3152  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3153  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3154  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3155  }
3156  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3157  &dmin, &next_block, 0, 0);
3158  }
3160  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3161  s->mv_type = MV_TYPE_FIELD;
3162  s->mb_intra= 0;
3163  for(dir=0; dir<2; dir++){
3164  for(i=0; i<2; i++){
3165  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3166  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3167  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3168  }
3169  }
3170  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3171  &dmin, &next_block, 0, 0);
3172  }
3174  s->mv_dir = 0;
3175  s->mv_type = MV_TYPE_16X16;
3176  s->mb_intra= 1;
3177  s->mv[0][0][0] = 0;
3178  s->mv[0][0][1] = 0;
3179  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3180  &dmin, &next_block, 0, 0);
3181  if(s->h263_pred || s->h263_aic){
3182  if(best_s.mb_intra)
3183  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3184  else
3185  ff_clean_intra_table_entries(s); //old mode?
3186  }
3187  }
3188 
3189  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3190  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3191  const int last_qp= backup_s.qscale;
3192  int qpi, qp, dc[6];
3193  int16_t ac[6][16];
3194  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3195  static const int dquant_tab[4]={-1,1,-2,2};
3196  int storecoefs = s->mb_intra && s->dc_val[0];
3197 
3198  av_assert2(backup_s.dquant == 0);
3199 
3200  //FIXME intra
3201  s->mv_dir= best_s.mv_dir;
3202  s->mv_type = MV_TYPE_16X16;
3203  s->mb_intra= best_s.mb_intra;
3204  s->mv[0][0][0] = best_s.mv[0][0][0];
3205  s->mv[0][0][1] = best_s.mv[0][0][1];
3206  s->mv[1][0][0] = best_s.mv[1][0][0];
3207  s->mv[1][0][1] = best_s.mv[1][0][1];
3208 
3209  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3210  for(; qpi<4; qpi++){
3211  int dquant= dquant_tab[qpi];
3212  qp= last_qp + dquant;
3213  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3214  continue;
3215  backup_s.dquant= dquant;
3216  if(storecoefs){
3217  for(i=0; i<6; i++){
3218  dc[i]= s->dc_val[0][ s->block_index[i] ];
3219  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3220  }
3221  }
3222 
3223  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3224  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3225  if(best_s.qscale != qp){
3226  if(storecoefs){
3227  for(i=0; i<6; i++){
3228  s->dc_val[0][ s->block_index[i] ]= dc[i];
3229  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3230  }
3231  }
3232  }
3233  }
3234  }
3235  }
3236  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3237  int mx= s->b_direct_mv_table[xy][0];
3238  int my= s->b_direct_mv_table[xy][1];
3239 
3240  backup_s.dquant = 0;
3241  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3242  s->mb_intra= 0;
3243  ff_mpeg4_set_direct_mv(s, mx, my);
3244  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3245  &dmin, &next_block, mx, my);
3246  }
3247  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3248  backup_s.dquant = 0;
3249  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3250  s->mb_intra= 0;
3251  ff_mpeg4_set_direct_mv(s, 0, 0);
3252  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3253  &dmin, &next_block, 0, 0);
3254  }
3255  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3256  int coded=0;
3257  for(i=0; i<6; i++)
3258  coded |= s->block_last_index[i];
3259  if(coded){
3260  int mx,my;
3261  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3262  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3263  mx=my=0; //FIXME find the one we actually used
3264  ff_mpeg4_set_direct_mv(s, mx, my);
3265  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3266  mx= s->mv[1][0][0];
3267  my= s->mv[1][0][1];
3268  }else{
3269  mx= s->mv[0][0][0];
3270  my= s->mv[0][0][1];
3271  }
3272 
3273  s->mv_dir= best_s.mv_dir;
3274  s->mv_type = best_s.mv_type;
3275  s->mb_intra= 0;
3276 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3277  s->mv[0][0][1] = best_s.mv[0][0][1];
3278  s->mv[1][0][0] = best_s.mv[1][0][0];
3279  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3280  backup_s.dquant= 0;
3281  s->skipdct=1;
3282  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3283  &dmin, &next_block, mx, my);
3284  s->skipdct=0;
3285  }
3286  }
3287 
3288  s->current_picture.qscale_table[xy] = best_s.qscale;
3289 
3290  copy_context_after_encode(s, &best_s);
3291 
3292  pb_bits_count= put_bits_count(&s->pb);
3293  flush_put_bits(&s->pb);
3294  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3295  s->pb= backup_s.pb;
3296 
3297  if(s->data_partitioning){
3298  pb2_bits_count= put_bits_count(&s->pb2);
3299  flush_put_bits(&s->pb2);
3300  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3301  s->pb2= backup_s.pb2;
3302 
3303  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3304  flush_put_bits(&s->tex_pb);
3305  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3306  s->tex_pb= backup_s.tex_pb;
3307  }
3308  s->last_bits= put_bits_count(&s->pb);
3309 
3310  if (CONFIG_H263_ENCODER &&
3311  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3313 
3314  if(next_block==0){ //FIXME 16 vs linesize16
3315  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3316  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3317  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3318  }
3319 
3320  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3321  mpv_reconstruct_mb(s, s->block);
3322  } else {
3323  int motion_x = 0, motion_y = 0;
3324  s->mv_type=MV_TYPE_16X16;
3325  // only one MB-Type possible
3326 
3327  switch(mb_type){
3329  s->mv_dir = 0;
3330  s->mb_intra= 1;
3331  motion_x= s->mv[0][0][0] = 0;
3332  motion_y= s->mv[0][0][1] = 0;
3333  break;
3335  s->mv_dir = MV_DIR_FORWARD;
3336  s->mb_intra= 0;
3337  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3338  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3339  break;
3341  s->mv_dir = MV_DIR_FORWARD;
3342  s->mv_type = MV_TYPE_FIELD;
3343  s->mb_intra= 0;
3344  for(i=0; i<2; i++){
3345  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3346  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3347  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3348  }
3349  break;
3351  s->mv_dir = MV_DIR_FORWARD;
3352  s->mv_type = MV_TYPE_8X8;
3353  s->mb_intra= 0;
3354  for(i=0; i<4; i++){
3355  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3356  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3357  }
3358  break;
3360  if (CONFIG_MPEG4_ENCODER) {
3362  s->mb_intra= 0;
3363  motion_x=s->b_direct_mv_table[xy][0];
3364  motion_y=s->b_direct_mv_table[xy][1];
3365  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3366  }
3367  break;
3369  if (CONFIG_MPEG4_ENCODER) {
3371  s->mb_intra= 0;
3372  ff_mpeg4_set_direct_mv(s, 0, 0);
3373  }
3374  break;
3376  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3377  s->mb_intra= 0;
3378  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3379  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3380  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3381  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3382  break;
3384  s->mv_dir = MV_DIR_BACKWARD;
3385  s->mb_intra= 0;
3386  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3387  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3388  break;
3390  s->mv_dir = MV_DIR_FORWARD;
3391  s->mb_intra= 0;
3392  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3393  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3394  break;
3396  s->mv_dir = MV_DIR_FORWARD;
3397  s->mv_type = MV_TYPE_FIELD;
3398  s->mb_intra= 0;
3399  for(i=0; i<2; i++){
3400  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3401  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3402  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3403  }
3404  break;
3406  s->mv_dir = MV_DIR_BACKWARD;
3407  s->mv_type = MV_TYPE_FIELD;
3408  s->mb_intra= 0;
3409  for(i=0; i<2; i++){
3410  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3411  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3412  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3413  }
3414  break;
3416  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3417  s->mv_type = MV_TYPE_FIELD;
3418  s->mb_intra= 0;
3419  for(dir=0; dir<2; dir++){
3420  for(i=0; i<2; i++){
3421  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3422  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3423  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3424  }
3425  }
3426  break;
3427  default:
3428  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3429  }
3430 
3431  encode_mb(s, motion_x, motion_y);
3432 
3433  // RAL: Update last macroblock type
3434  s->last_mv_dir = s->mv_dir;
3435 
3436  if (CONFIG_H263_ENCODER &&
3437  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3439 
3440  mpv_reconstruct_mb(s, s->block);
3441  }
3442 
3443  /* clean the MV table in IPS frames for direct mode in B-frames */
3444  if(s->mb_intra /* && I,P,S_TYPE */){
3445  s->p_mv_table[xy][0]=0;
3446  s->p_mv_table[xy][1]=0;
3447  }
3448 
3449  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3450  int w= 16;
3451  int h= 16;
3452 
3453  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3454  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3455 
3456  s->encoding_error[0] += sse(
3457  s, s->new_picture->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3458  s->dest[0], w, h, s->linesize);
3459  s->encoding_error[1] += sse(
3460  s, s->new_picture->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3461  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3462  s->encoding_error[2] += sse(
3463  s, s->new_picture->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3464  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3465  }
3466  if(s->loop_filter){
3467  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3469  }
3470  ff_dlog(s->avctx, "MB %d %d bits\n",
3471  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3472  }
3473  }
3474 
3475  //not beautiful here but we must write it before flushing so it has to be here
3476  if (CONFIG_MSMPEG4ENC && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3478 
3479  write_slice_end(s);
3480 
3481  return 0;
3482 }
3483 
3484 #define MERGE(field) dst->field += src->field; src->field=0
3486  MERGE(me.scene_change_score);
3487  MERGE(me.mc_mb_var_sum_temp);
3488  MERGE(me.mb_var_sum_temp);
3489 }
3490 
3492  int i;
3493 
3494  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3495  MERGE(dct_count[1]);
3496  MERGE(mv_bits);
3497  MERGE(i_tex_bits);
3498  MERGE(p_tex_bits);
3499  MERGE(i_count);
3500  MERGE(skip_count);
3501  MERGE(misc_bits);
3502  MERGE(encoding_error[0]);
3503  MERGE(encoding_error[1]);
3504  MERGE(encoding_error[2]);
3505 
3506  if (dst->noise_reduction){
3507  for(i=0; i<64; i++){
3508  MERGE(dct_error_sum[0][i]);
3509  MERGE(dct_error_sum[1][i]);
3510  }
3511  }
3512 
3513  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3514  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3515  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3516  flush_put_bits(&dst->pb);
3517 }
3518 
3519 static int estimate_qp(MpegEncContext *s, int dry_run){
3520  if (s->next_lambda){
3521  s->current_picture_ptr->f->quality =
3522  s->current_picture.f->quality = s->next_lambda;
3523  if(!dry_run) s->next_lambda= 0;
3524  } else if (!s->fixed_qscale) {
3525  int quality = ff_rate_estimate_qscale(s, dry_run);
3526  s->current_picture_ptr->f->quality =
3527  s->current_picture.f->quality = quality;
3528  if (s->current_picture.f->quality < 0)
3529  return -1;
3530  }
3531 
3532  if(s->adaptive_quant){
3533  switch(s->codec_id){
3534  case AV_CODEC_ID_MPEG4:
3535  if (CONFIG_MPEG4_ENCODER)
3537  break;
3538  case AV_CODEC_ID_H263:
3539  case AV_CODEC_ID_H263P:
3540  case AV_CODEC_ID_FLV1:
3541  if (CONFIG_H263_ENCODER)
3543  break;
3544  default:
3546  }
3547 
3548  s->lambda= s->lambda_table[0];
3549  //FIXME broken
3550  }else
3551  s->lambda = s->current_picture.f->quality;
3552  update_qscale(s);
3553  return 0;
3554 }
3555 
3556 /* must be called before writing the header */
3558  av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3559  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3560 
3561  if(s->pict_type==AV_PICTURE_TYPE_B){
3562  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3563  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3564  }else{
3565  s->pp_time= s->time - s->last_non_b_time;
3566  s->last_non_b_time= s->time;
3567  av_assert1(s->picture_number==0 || s->pp_time > 0);
3568  }
3569 }
3570 
3572 {
3573  int i, ret;
3574  int bits;
3575  int context_count = s->slice_context_count;
3576 
3577  /* Reset the average MB variance */
3578  s->me.mb_var_sum_temp =
3579  s->me.mc_mb_var_sum_temp = 0;
3580 
3581  /* we need to initialize some time vars before we can encode B-frames */
3582  // RAL: Condition added for MPEG1VIDEO
3583  if (s->out_format == FMT_MPEG1 || (s->h263_pred && !s->msmpeg4_version))
3585  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3587 
3588  s->me.scene_change_score=0;
3589 
3590 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3591 
3592  if(s->pict_type==AV_PICTURE_TYPE_I){
3593  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3594  else s->no_rounding=0;
3595  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3596  if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3597  s->no_rounding ^= 1;
3598  }
3599 
3600  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3601  if (estimate_qp(s,1) < 0)
3602  return -1;
3604  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3605  if(s->pict_type==AV_PICTURE_TYPE_B)
3606  s->lambda= s->last_lambda_for[s->pict_type];
3607  else
3608  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3609  update_qscale(s);
3610  }
3611 
3612  if (s->out_format != FMT_MJPEG) {
3613  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3614  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3615  s->q_chroma_intra_matrix = s->q_intra_matrix;
3616  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3617  }
3618 
3619  s->mb_intra=0; //for the rate distortion & bit compare functions
3620  for(i=1; i<context_count; i++){
3621  ret = ff_update_duplicate_context(s->thread_context[i], s);
3622  if (ret < 0)
3623  return ret;
3624  }
3625 
3626  if(ff_init_me(s)<0)
3627  return -1;
3628 
3629  /* Estimate motion for every MB */
3630  if(s->pict_type != AV_PICTURE_TYPE_I){
3631  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3632  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3633  if (s->pict_type != AV_PICTURE_TYPE_B) {
3634  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3635  s->me_pre == 2) {
3636  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3637  }
3638  }
3639 
3640  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3641  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3642  /* I-Frame */
3643  for(i=0; i<s->mb_stride*s->mb_height; i++)
3644  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3645 
3646  if(!s->fixed_qscale){
3647  /* finding spatial complexity for I-frame rate control */
3648  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3649  }
3650  }
3651  for(i=1; i<context_count; i++){
3652  merge_context_after_me(s, s->thread_context[i]);
3653  }
3654  s->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3655  s->mb_var_sum = s->me. mb_var_sum_temp;
3656  emms_c();
3657 
3658  if (s->me.scene_change_score > s->scenechange_threshold &&
3659  s->pict_type == AV_PICTURE_TYPE_P) {
3660  s->pict_type= AV_PICTURE_TYPE_I;
3661  for(i=0; i<s->mb_stride*s->mb_height; i++)
3662  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3663  if(s->msmpeg4_version >= 3)
3664  s->no_rounding=1;
3665  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3666  s->mb_var_sum, s->mc_mb_var_sum);
3667  }
3668 
3669  if(!s->umvplus){
3670  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3671  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3672 
3673  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3674  int a,b;
3675  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3676  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3677  s->f_code= FFMAX3(s->f_code, a, b);
3678  }
3679 
3681  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3682  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3683  int j;
3684  for(i=0; i<2; i++){
3685  for(j=0; j<2; j++)
3686  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3687  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3688  }
3689  }
3690  } else if (s->pict_type == AV_PICTURE_TYPE_B) {
3691  int a, b;
3692 
3693  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3694  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3695  s->f_code = FFMAX(a, b);
3696 
3697  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3698  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3699  s->b_code = FFMAX(a, b);
3700 
3701  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3702  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3703  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3704  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3705  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3706  int dir, j;
3707  for(dir=0; dir<2; dir++){
3708  for(i=0; i<2; i++){
3709  for(j=0; j<2; j++){
3712  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3713  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3714  }
3715  }
3716  }
3717  }
3718  }
3719  }
3720 
3721  if (estimate_qp(s, 0) < 0)
3722  return -1;
3723 
3724  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3725  s->pict_type == AV_PICTURE_TYPE_I &&
3726  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3727  s->qscale= 3; //reduce clipping problems
3728 
3729  if (s->out_format == FMT_MJPEG) {
3730  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3731  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3732 
3733  if (s->avctx->intra_matrix) {
3734  chroma_matrix =
3735  luma_matrix = s->avctx->intra_matrix;
3736  }
3737  if (s->avctx->chroma_intra_matrix)
3738  chroma_matrix = s->avctx->chroma_intra_matrix;
3739 
3740  /* for mjpeg, we do include qscale in the matrix */
3741  for(i=1;i<64;i++){
3742  int j = s->idsp.idct_permutation[i];
3743 
3744  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3745  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3746  }
3747  s->y_dc_scale_table=
3748  s->c_dc_scale_table = ff_mpeg12_dc_scale_table[s->intra_dc_precision];
3749  s->chroma_intra_matrix[0] =
3750  s->intra_matrix[0] = ff_mpeg12_dc_scale_table[s->intra_dc_precision][8];
3751  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3752  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3753  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3754  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3755  s->qscale= 8;
3756 
3757  if (s->codec_id == AV_CODEC_ID_AMV) {
3758  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3759  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3760  for (int i = 1; i < 64; i++) {
3761  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
3762 
3763  s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3764  s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3765  }
3766  s->y_dc_scale_table = y;
3767  s->c_dc_scale_table = c;
3768  s->intra_matrix[0] = 13;
3769  s->chroma_intra_matrix[0] = 14;
3770  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3771  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3772  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3773  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3774  s->qscale = 8;
3775  }
3776  }
3777 
3778  //FIXME var duplication
3779  s->current_picture_ptr->f->key_frame =
3780  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3781  s->current_picture_ptr->f->pict_type =
3782  s->current_picture.f->pict_type = s->pict_type;
3783 
3784  if (s->current_picture.f->key_frame)
3785  s->picture_in_gop_number=0;
3786 
3787  s->mb_x = s->mb_y = 0;
3788  s->last_bits= put_bits_count(&s->pb);
3789  switch(s->out_format) {
3790 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3791  case FMT_MJPEG:
3793  break;
3794 #endif
3795  case FMT_SPEEDHQ:
3796  if (CONFIG_SPEEDHQ_ENCODER)
3798  break;
3799  case FMT_H261:
3800  if (CONFIG_H261_ENCODER)
3802  break;
3803  case FMT_H263:
3804  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3806  else if (CONFIG_MSMPEG4ENC && s->msmpeg4_version)
3808  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3810  if (ret < 0)
3811  return ret;
3812  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3814  if (ret < 0)
3815  return ret;
3816  }
3817  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3819  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3821  else if (CONFIG_H263_ENCODER)
3823  break;
3824  case FMT_MPEG1:
3825  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3827  break;
3828  default:
3829  av_assert0(0);
3830  }
3831  bits= put_bits_count(&s->pb);
3832  s->header_bits= bits - s->last_bits;
3833 
3834  for(i=1; i<context_count; i++){
3835  update_duplicate_context_after_me(s->thread_context[i], s);
3836  }
3837  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3838  for(i=1; i<context_count; i++){
3839  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3840  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3841  merge_context_after_encode(s, s->thread_context[i]);
3842  }
3843  emms_c();
3844  return 0;
3845 }
3846 
3847 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3848  const int intra= s->mb_intra;
3849  int i;
3850 
3851  s->dct_count[intra]++;
3852 
3853  for(i=0; i<64; i++){
3854  int level= block[i];
3855 
3856  if(level){
3857  if(level>0){
3858  s->dct_error_sum[intra][i] += level;
3859  level -= s->dct_offset[intra][i];
3860  if(level<0) level=0;
3861  }else{
3862  s->dct_error_sum[intra][i] -= level;
3863  level += s->dct_offset[intra][i];
3864  if(level>0) level=0;
3865  }
3866  block[i]= level;
3867  }
3868  }
3869 }
3870 
3872  int16_t *block, int n,
3873  int qscale, int *overflow){
3874  const int *qmat;
3875  const uint16_t *matrix;
3876  const uint8_t *scantable;
3877  const uint8_t *perm_scantable;
3878  int max=0;
3879  unsigned int threshold1, threshold2;
3880  int bias=0;
3881  int run_tab[65];
3882  int level_tab[65];
3883  int score_tab[65];
3884  int survivor[65];
3885  int survivor_count;
3886  int last_run=0;
3887  int last_level=0;
3888  int last_score= 0;
3889  int last_i;
3890  int coeff[2][64];
3891  int coeff_count[64];
3892  int qmul, qadd, start_i, last_non_zero, i, dc;
3893  const int esc_length= s->ac_esc_length;
3894  uint8_t * length;
3895  uint8_t * last_length;
3896  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3897  int mpeg2_qscale;
3898 
3899  s->fdsp.fdct(block);
3900 
3901  if(s->dct_error_sum)
3902  s->denoise_dct(s, block);
3903  qmul= qscale*16;
3904  qadd= ((qscale-1)|1)*8;
3905 
3906  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3907  else mpeg2_qscale = qscale << 1;
3908 
3909  if (s->mb_intra) {
3910  int q;
3911  scantable= s->intra_scantable.scantable;
3912  perm_scantable= s->intra_scantable.permutated;
3913  if (!s->h263_aic) {
3914  if (n < 4)
3915  q = s->y_dc_scale;
3916  else
3917  q = s->c_dc_scale;
3918  q = q << 3;
3919  } else{
3920  /* For AIC we skip quant/dequant of INTRADC */
3921  q = 1 << 3;
3922  qadd=0;
3923  }
3924 
3925  /* note: block[0] is assumed to be positive */
3926  block[0] = (block[0] + (q >> 1)) / q;
3927  start_i = 1;
3928  last_non_zero = 0;
3929  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3930  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3931  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3932  bias= 1<<(QMAT_SHIFT-1);
3933 
3934  if (n > 3 && s->intra_chroma_ac_vlc_length) {
3935  length = s->intra_chroma_ac_vlc_length;
3936  last_length= s->intra_chroma_ac_vlc_last_length;
3937  } else {
3938  length = s->intra_ac_vlc_length;
3939  last_length= s->intra_ac_vlc_last_length;
3940  }
3941  } else {
3942  scantable= s->inter_scantable.scantable;
3943  perm_scantable= s->inter_scantable.permutated;
3944  start_i = 0;
3945  last_non_zero = -1;
3946  qmat = s->q_inter_matrix[qscale];
3947  matrix = s->inter_matrix;
3948  length = s->inter_ac_vlc_length;
3949  last_length= s->inter_ac_vlc_last_length;
3950  }
3951  last_i= start_i;
3952 
3953  threshold1= (1<<QMAT_SHIFT) - bias - 1;
3954  threshold2= (threshold1<<1);
3955 
3956  for(i=63; i>=start_i; i--) {
3957  const int j = scantable[i];
3958  int level = block[j] * qmat[j];
3959 
3960  if(((unsigned)(level+threshold1))>threshold2){
3961  last_non_zero = i;
3962  break;
3963  }
3964  }
3965 
3966  for(i=start_i; i<=last_non_zero; i++) {
3967  const int j = scantable[i];
3968  int level = block[j] * qmat[j];
3969 
3970 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3971 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3972  if(((unsigned)(level+threshold1))>threshold2){
3973  if(level>0){
3974  level= (bias + level)>>QMAT_SHIFT;
3975  coeff[0][i]= level;
3976  coeff[1][i]= level-1;
3977 // coeff[2][k]= level-2;
3978  }else{
3979  level= (bias - level)>>QMAT_SHIFT;
3980  coeff[0][i]= -level;
3981  coeff[1][i]= -level+1;
3982 // coeff[2][k]= -level+2;
3983  }
3984  coeff_count[i]= FFMIN(level, 2);
3985  av_assert2(coeff_count[i]);
3986  max |=level;
3987  }else{
3988  coeff[0][i]= (level>>31)|1;
3989  coeff_count[i]= 1;
3990  }
3991  }
3992 
3993  *overflow= s->max_qcoeff < max; //overflow might have happened
3994 
3995  if(last_non_zero < start_i){
3996  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3997  return last_non_zero;
3998  }
3999 
4000  score_tab[start_i]= 0;
4001  survivor[0]= start_i;
4002  survivor_count= 1;
4003 
4004  for(i=start_i; i<=last_non_zero; i++){
4005  int level_index, j, zero_distortion;
4006  int dct_coeff= FFABS(block[ scantable[i] ]);
4007  int best_score=256*256*256*120;
4008 
4009  if (s->fdsp.fdct == ff_fdct_ifast)
4010  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4011  zero_distortion= dct_coeff*dct_coeff;
4012 
4013  for(level_index=0; level_index < coeff_count[i]; level_index++){
4014  int distortion;
4015  int level= coeff[level_index][i];
4016  const int alevel= FFABS(level);
4017  int unquant_coeff;
4018 
4019  av_assert2(level);
4020 
4021  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4022  unquant_coeff= alevel*qmul + qadd;
4023  } else if(s->out_format == FMT_MJPEG) {
4024  j = s->idsp.idct_permutation[scantable[i]];
4025  unquant_coeff = alevel * matrix[j] * 8;
4026  }else{ // MPEG-1
4027  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4028  if(s->mb_intra){
4029  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4030  unquant_coeff = (unquant_coeff - 1) | 1;
4031  }else{
4032  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4033  unquant_coeff = (unquant_coeff - 1) | 1;
4034  }
4035  unquant_coeff<<= 3;
4036  }
4037 
4038  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4039  level+=64;
4040  if((level&(~127)) == 0){
4041  for(j=survivor_count-1; j>=0; j--){
4042  int run= i - survivor[j];
4043  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4044  score += score_tab[i-run];
4045 
4046  if(score < best_score){
4047  best_score= score;
4048  run_tab[i+1]= run;
4049  level_tab[i+1]= level-64;
4050  }
4051  }
4052 
4053  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4054  for(j=survivor_count-1; j>=0; j--){
4055  int run= i - survivor[j];
4056  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4057  score += score_tab[i-run];
4058  if(score < last_score){
4059  last_score= score;
4060  last_run= run;
4061  last_level= level-64;
4062  last_i= i+1;
4063  }
4064  }
4065  }
4066  }else{
4067  distortion += esc_length*lambda;
4068  for(j=survivor_count-1; j>=0; j--){
4069  int run= i - survivor[j];
4070  int score= distortion + score_tab[i-run];
4071 
4072  if(score < best_score){
4073  best_score= score;
4074  run_tab[i+1]= run;
4075  level_tab[i+1]= level-64;
4076  }
4077  }
4078 
4079  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4080  for(j=survivor_count-1; j>=0; j--){
4081  int run= i - survivor[j];
4082  int score= distortion + score_tab[i-run];
4083  if(score < last_score){
4084  last_score= score;
4085  last_run= run;
4086  last_level= level-64;
4087  last_i= i+1;
4088  }
4089  }
4090  }
4091  }
4092  }
4093 
4094  score_tab[i+1]= best_score;
4095 
4096  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4097  if(last_non_zero <= 27){
4098  for(; survivor_count; survivor_count--){
4099  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4100  break;
4101  }
4102  }else{
4103  for(; survivor_count; survivor_count--){
4104  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4105  break;
4106  }
4107  }
4108 
4109  survivor[ survivor_count++ ]= i+1;
4110  }
4111 
4112  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4113  last_score= 256*256*256*120;
4114  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4115  int score= score_tab[i];
4116  if (i)
4117  score += lambda * 2; // FIXME more exact?
4118 
4119  if(score < last_score){
4120  last_score= score;
4121  last_i= i;
4122  last_level= level_tab[i];
4123  last_run= run_tab[i];
4124  }
4125  }
4126  }
4127 
4128  s->coded_score[n] = last_score;
4129 
4130  dc= FFABS(block[0]);
4131  last_non_zero= last_i - 1;
4132  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4133 
4134  if(last_non_zero < start_i)
4135  return last_non_zero;
4136 
4137  if(last_non_zero == 0 && start_i == 0){
4138  int best_level= 0;
4139  int best_score= dc * dc;
4140 
4141  for(i=0; i<coeff_count[0]; i++){
4142  int level= coeff[i][0];
4143  int alevel= FFABS(level);
4144  int unquant_coeff, score, distortion;
4145 
4146  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4147  unquant_coeff= (alevel*qmul + qadd)>>3;
4148  } else{ // MPEG-1
4149  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4150  unquant_coeff = (unquant_coeff - 1) | 1;
4151  }
4152  unquant_coeff = (unquant_coeff + 4) >> 3;
4153  unquant_coeff<<= 3 + 3;
4154 
4155  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4156  level+=64;
4157  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4158  else score= distortion + esc_length*lambda;
4159 
4160  if(score < best_score){
4161  best_score= score;
4162  best_level= level - 64;
4163  }
4164  }
4165  block[0]= best_level;
4166  s->coded_score[n] = best_score - dc*dc;
4167  if(best_level == 0) return -1;
4168  else return last_non_zero;
4169  }
4170 
4171  i= last_i;
4172  av_assert2(last_level);
4173 
4174  block[ perm_scantable[last_non_zero] ]= last_level;
4175  i -= last_run + 1;
4176 
4177  for(; i>start_i; i -= run_tab[i] + 1){
4178  block[ perm_scantable[i-1] ]= level_tab[i];
4179  }
4180 
4181  return last_non_zero;
4182 }
4183 
4184 static int16_t basis[64][64];
4185 
4186 static void build_basis(uint8_t *perm){
4187  int i, j, x, y;
4188  emms_c();
4189  for(i=0; i<8; i++){
4190  for(j=0; j<8; j++){
4191  for(y=0; y<8; y++){
4192  for(x=0; x<8; x++){
4193  double s= 0.25*(1<<BASIS_SHIFT);
4194  int index= 8*i + j;
4195  int perm_index= perm[index];
4196  if(i==0) s*= sqrt(0.5);
4197  if(j==0) s*= sqrt(0.5);
4198  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4199  }
4200  }
4201  }
4202  }
4203 }
4204 
4205 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4206  int16_t *block, int16_t *weight, int16_t *orig,
4207  int n, int qscale){
4208  int16_t rem[64];
4209  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4210  const uint8_t *scantable;
4211  const uint8_t *perm_scantable;
4212 // unsigned int threshold1, threshold2;
4213 // int bias=0;
4214  int run_tab[65];
4215  int prev_run=0;
4216  int prev_level=0;
4217  int qmul, qadd, start_i, last_non_zero, i, dc;
4218  uint8_t * length;
4219  uint8_t * last_length;
4220  int lambda;
4221  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4222 
4223  if(basis[0][0] == 0)
4224  build_basis(s->idsp.idct_permutation);
4225 
4226  qmul= qscale*2;
4227  qadd= (qscale-1)|1;
4228  if (s->mb_intra) {
4229  scantable= s->intra_scantable.scantable;
4230  perm_scantable= s->intra_scantable.permutated;
4231  if (!s->h263_aic) {
4232  if (n < 4)
4233  q = s->y_dc_scale;
4234  else
4235  q = s->c_dc_scale;
4236  } else{
4237  /* For AIC we skip quant/dequant of INTRADC */
4238  q = 1;
4239  qadd=0;
4240  }
4241  q <<= RECON_SHIFT-3;
4242  /* note: block[0] is assumed to be positive */
4243  dc= block[0]*q;
4244 // block[0] = (block[0] + (q >> 1)) / q;
4245  start_i = 1;
4246 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4247 // bias= 1<<(QMAT_SHIFT-1);
4248  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4249  length = s->intra_chroma_ac_vlc_length;
4250  last_length= s->intra_chroma_ac_vlc_last_length;
4251  } else {
4252  length = s->intra_ac_vlc_length;
4253  last_length= s->intra_ac_vlc_last_length;
4254  }
4255  } else {
4256  scantable= s->inter_scantable.scantable;
4257  perm_scantable= s->inter_scantable.permutated;
4258  dc= 0;
4259  start_i = 0;
4260  length = s->inter_ac_vlc_length;
4261  last_length= s->inter_ac_vlc_last_length;
4262  }
4263  last_non_zero = s->block_last_index[n];
4264 
4265  dc += (1<<(RECON_SHIFT-1));
4266  for(i=0; i<64; i++){
4267  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4268  }
4269 
4270  sum=0;
4271  for(i=0; i<64; i++){
4272  int one= 36;
4273  int qns=4;
4274  int w;
4275 
4276  w= FFABS(weight[i]) + qns*one;
4277  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4278 
4279  weight[i] = w;
4280 // w=weight[i] = (63*qns + (w/2)) / w;
4281 
4282  av_assert2(w>0);
4283  av_assert2(w<(1<<6));
4284  sum += w*w;
4285  }
4286  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4287 
4288  run=0;
4289  rle_index=0;
4290  for(i=start_i; i<=last_non_zero; i++){
4291  int j= perm_scantable[i];
4292  const int level= block[j];
4293  int coeff;
4294 
4295  if(level){
4296  if(level<0) coeff= qmul*level - qadd;
4297  else coeff= qmul*level + qadd;
4298  run_tab[rle_index++]=run;
4299  run=0;
4300 
4301  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4302  }else{
4303  run++;
4304  }
4305  }
4306 
4307  for(;;){
4308  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4309  int best_coeff=0;
4310  int best_change=0;
4311  int run2, best_unquant_change=0, analyze_gradient;
4312  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4313 
4314  if(analyze_gradient){
4315  for(i=0; i<64; i++){
4316  int w= weight[i];
4317 
4318  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4319  }
4320  s->fdsp.fdct(d1);
4321  }
4322 
4323  if(start_i){
4324  const int level= block[0];
4325  int change, old_coeff;
4326 
4327  av_assert2(s->mb_intra);
4328 
4329  old_coeff= q*level;
4330 
4331  for(change=-1; change<=1; change+=2){
4332  int new_level= level + change;
4333  int score, new_coeff;
4334 
4335  new_coeff= q*new_level;
4336  if(new_coeff >= 2048 || new_coeff < 0)
4337  continue;
4338 
4339  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4340  new_coeff - old_coeff);
4341  if(score<best_score){
4342  best_score= score;
4343  best_coeff= 0;
4344  best_change= change;
4345  best_unquant_change= new_coeff - old_coeff;
4346  }
4347  }
4348  }
4349 
4350  run=0;
4351  rle_index=0;
4352  run2= run_tab[rle_index++];
4353  prev_level=0;
4354  prev_run=0;
4355 
4356  for(i=start_i; i<64; i++){
4357  int j= perm_scantable[i];
4358  const int level= block[j];
4359  int change, old_coeff;
4360 
4361  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4362  break;
4363 
4364  if(level){
4365  if(level<0) old_coeff= qmul*level - qadd;
4366  else old_coeff= qmul*level + qadd;
4367  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4368  }else{
4369  old_coeff=0;
4370  run2--;
4371  av_assert2(run2>=0 || i >= last_non_zero );
4372  }
4373 
4374  for(change=-1; change<=1; change+=2){
4375  int new_level= level + change;
4376  int score, new_coeff, unquant_change;
4377 
4378  score=0;
4379  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4380  continue;
4381 
4382  if(new_level){
4383  if(new_level<0) new_coeff= qmul*new_level - qadd;
4384  else new_coeff= qmul*new_level + qadd;
4385  if(new_coeff >= 2048 || new_coeff <= -2048)
4386  continue;
4387  //FIXME check for overflow
4388 
4389  if(level){
4390  if(level < 63 && level > -63){
4391  if(i < last_non_zero)
4392  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4393  - length[UNI_AC_ENC_INDEX(run, level+64)];
4394  else
4395  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4396  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4397  }
4398  }else{
4399  av_assert2(FFABS(new_level)==1);
4400 
4401  if(analyze_gradient){
4402  int g= d1[ scantable[i] ];
4403  if(g && (g^new_level) >= 0)
4404  continue;
4405  }
4406 
4407  if(i < last_non_zero){
4408  int next_i= i + run2 + 1;
4409  int next_level= block[ perm_scantable[next_i] ] + 64;
4410 
4411  if(next_level&(~127))
4412  next_level= 0;
4413 
4414  if(next_i < last_non_zero)
4415  score += length[UNI_AC_ENC_INDEX(run, 65)]
4416  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4417  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4418  else
4419  score += length[UNI_AC_ENC_INDEX(run, 65)]
4420  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4421  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4422  }else{
4423  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4424  if(prev_level){
4425  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4426  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4427  }
4428  }
4429  }
4430  }else{
4431  new_coeff=0;
4432  av_assert2(FFABS(level)==1);
4433 
4434  if(i < last_non_zero){
4435  int next_i= i + run2 + 1;
4436  int next_level= block[ perm_scantable[next_i] ] + 64;
4437 
4438  if(next_level&(~127))
4439  next_level= 0;
4440 
4441  if(next_i < last_non_zero)
4442  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4443  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4444  - length[UNI_AC_ENC_INDEX(run, 65)];
4445  else
4446  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4447  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4448  - length[UNI_AC_ENC_INDEX(run, 65)];
4449  }else{
4450  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4451  if(prev_level){
4452  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4453  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4454  }
4455  }
4456  }
4457 
4458  score *= lambda;
4459 
4460  unquant_change= new_coeff - old_coeff;
4461  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4462 
4463  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4464  unquant_change);
4465  if(score<best_score){
4466  best_score= score;
4467  best_coeff= i;
4468  best_change= change;
4469  best_unquant_change= unquant_change;
4470  }
4471  }
4472  if(level){
4473  prev_level= level + 64;
4474  if(prev_level&(~127))
4475  prev_level= 0;
4476  prev_run= run;
4477  run=0;
4478  }else{
4479  run++;
4480  }
4481  }
4482 
4483  if(best_change){
4484  int j= perm_scantable[ best_coeff ];
4485 
4486  block[j] += best_change;
4487 
4488  if(best_coeff > last_non_zero){
4489  last_non_zero= best_coeff;
4490  av_assert2(block[j]);
4491  }else{
4492  for(; last_non_zero>=start_i; last_non_zero--){
4493  if(block[perm_scantable[last_non_zero]])
4494  break;
4495  }
4496  }
4497 
4498  run=0;
4499  rle_index=0;
4500  for(i=start_i; i<=last_non_zero; i++){
4501  int j= perm_scantable[i];
4502  const int level= block[j];
4503 
4504  if(level){
4505  run_tab[rle_index++]=run;
4506  run=0;
4507  }else{
4508  run++;
4509  }
4510  }
4511 
4512  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4513  }else{
4514  break;
4515  }
4516  }
4517 
4518  return last_non_zero;
4519 }
4520 
4521 /**
4522  * Permute an 8x8 block according to permutation.
4523  * @param block the block which will be permuted according to
4524  * the given permutation vector
4525  * @param permutation the permutation vector
4526  * @param last the last non zero coefficient in scantable order, used to
4527  * speed the permutation up
4528  * @param scantable the used scantable, this is only used to speed the
4529  * permutation up, the block is not (inverse) permutated
4530  * to scantable order!
4531  */
4532 void ff_block_permute(int16_t *block, uint8_t *permutation,
4533  const uint8_t *scantable, int last)
4534 {
4535  int i;
4536  int16_t temp[64];
4537 
4538  if (last <= 0)
4539  return;
4540  //FIXME it is ok but not clean and might fail for some permutations
4541  // if (permutation[1] == 1)
4542  // return;
4543 
4544  for (i = 0; i <= last; i++) {
4545  const int j = scantable[i];
4546  temp[j] = block[j];
4547  block[j] = 0;
4548  }
4549 
4550  for (i = 0; i <= last; i++) {
4551  const int j = scantable[i];
4552  const int perm_j = permutation[j];
4553  block[perm_j] = temp[j];
4554  }
4555 }
4556 
4558  int16_t *block, int n,
4559  int qscale, int *overflow)
4560 {
4561  int i, j, level, last_non_zero, q, start_i;
4562  const int *qmat;
4563  const uint8_t *scantable;
4564  int bias;
4565  int max=0;
4566  unsigned int threshold1, threshold2;
4567 
4568  s->fdsp.fdct(block);
4569 
4570  if(s->dct_error_sum)
4571  s->denoise_dct(s, block);
4572 
4573  if (s->mb_intra) {
4574  scantable= s->intra_scantable.scantable;
4575  if (!s->h263_aic) {
4576  if (n < 4)
4577  q = s->y_dc_scale;
4578  else
4579  q = s->c_dc_scale;
4580  q = q << 3;
4581  } else
4582  /* For AIC we skip quant/dequant of INTRADC */
4583  q = 1 << 3;
4584 
4585  /* note: block[0] is assumed to be positive */
4586  block[0] = (block[0] + (q >> 1)) / q;
4587  start_i = 1;
4588  last_non_zero = 0;
4589  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4590  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4591  } else {
4592  scantable= s->inter_scantable.scantable;
4593  start_i = 0;
4594  last_non_zero = -1;
4595  qmat = s->q_inter_matrix[qscale];
4596  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4597  }
4598  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4599  threshold2= (threshold1<<1);
4600  for(i=63;i>=start_i;i--) {
4601  j = scantable[i];
4602  level = block[j] * qmat[j];
4603 
4604  if(((unsigned)(level+threshold1))>threshold2){
4605  last_non_zero = i;
4606  break;
4607  }else{
4608  block[j]=0;
4609  }
4610  }
4611  for(i=start_i; i<=last_non_zero; i++) {
4612  j = scantable[i];
4613  level = block[j] * qmat[j];
4614 
4615 // if( bias+level >= (1<<QMAT_SHIFT)
4616 // || bias-level >= (1<<QMAT_SHIFT)){
4617  if(((unsigned)(level+threshold1))>threshold2){
4618  if(level>0){
4619  level= (bias + level)>>QMAT_SHIFT;
4620  block[j]= level;
4621  }else{
4622  level= (bias - level)>>QMAT_SHIFT;
4623  block[j]= -level;
4624  }
4625  max |=level;
4626  }else{
4627  block[j]=0;
4628  }
4629  }
4630  *overflow= s->max_qcoeff < max; //overflow might have happened
4631 
4632  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4633  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4634  ff_block_permute(block, s->idsp.idct_permutation,
4635  scantable, last_non_zero);
4636 
4637  return last_non_zero;
4638 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:97
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1287
MpegEncContext::i_count
int i_count
Definition: mpegvideo.h:342
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:682
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:83
ff_speedhq_end_slice
void ff_speedhq_end_slice(MpegEncContext *s)
Definition: speedhqenc.c:149
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:198
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
MpegEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:287
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:262
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
h263data.h
ff_speedhq_encode_init
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
Definition: speedhqenc.c:98
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:101
level
uint8_t level
Definition: svq3.c:204
MpegEncContext::data_partitioning
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:387
av_clip
#define av_clip
Definition: common.h:95
set_frame_distances
static void set_frame_distances(MpegEncContext *s)
Definition: mpegvideo_enc.c:3557
get_bits_diff
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideoenc.h:139
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:521
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:197
H263_GOB_HEIGHT
#define H263_GOB_HEIGHT(h)
Definition: h263.h:28
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
r
const char * r
Definition: vf_curves.c:126
acc
int acc
Definition: yuv2rgb.c:554
ff_h261_encode_init
av_cold int ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:372
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:222
ff_mpeg1_encode_mb
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
MpegEncContext::b_code
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:226
mem_internal.h
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:521
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:242
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1261
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1652
MAY_BE_MPEG12
#define MAY_BE_MPEG12
Definition: mpv_reconstruct_mb_template.c:24
ff_speedhq_encode_picture_header
void ff_speedhq_encode_picture_header(MpegEncContext *s)
Definition: speedhqenc.c:137
ff_wmv2_encode_picture_header
int ff_wmv2_encode_picture_header(MpegEncContext *s)
Definition: wmv2enc.c:96
thread.h
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:602
encode_mb
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2501
matrix
Definition: vc1dsp.c:42
ff_h261_encode_picture_header
void ff_h261_encode_picture_header(MpegEncContext *s)
Definition: h261enc.c:55
src1
const pixel * src1
Definition: h264pred_template.c:421
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:216
mpegvideoenc.h
sse_mb
static int sse_mb(MpegEncContext *s)
Definition: mpegvideo_enc.c:2650
mpv_reconstruct_mb_template.c
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
COPY
#define COPY(a)
ff_block_permute
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4532
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4184
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:910
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:170
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:163
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2703
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:744
update_noise_reduction
static void update_noise_reduction(MpegEncContext *s)
Definition: mpegvideo_enc.c:1666
out_size
int out_size
Definition: movenc.c:55
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:260
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
MpegEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:388
MpegEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:328
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:490
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:221
INTERLACED_DCT
#define INTERLACED_DCT(s)
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:437
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:37
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:107
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:203
w
uint8_t w
Definition: llviddspenc.c:38
BUF_BITS
static const int BUF_BITS
Definition: put_bits.h:48
internal.h
ff_h261_encode_mb
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:234
AVPacket::data
uint8_t * data
Definition: packet.h:374
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
Definition: mpegvideo_enc.c:1087
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:369
AVOption
AVOption.
Definition: opt.h:251
encode.h
b
#define b
Definition: input.c:41
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:100
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:109
data
const char data[16]
Definition: mxf.c:146
MpegEncContext::vbv_delay
int vbv_delay
Definition: mpegvideo.h:203
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
MpegEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideo.h:215
ff_mjpeg_encode_init
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:294
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegutils.h:114
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:818
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:848
ff_msmpeg4_encode_mb
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:373
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:392
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:39
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:85
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:471
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:305
Picture
Picture.
Definition: mpegpicture.h:46
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:100
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2684
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2086
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
MpegEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideo.h:141
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:898
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:98
wmv2enc.h
ff_add_cpb_side_data
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:1028
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1225
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:330
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:539
ff_flv_encode_picture_header
void ff_flv_encode_picture_header(MpegEncContext *s)
Definition: flvenc.c:28
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:220
ff_set_cmp
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:476
encode_picture
static int encode_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:3571
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:581
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:429
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:73
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
MPEGVIDEO_MAX_PLANES
#define MPEGVIDEO_MAX_PLANES
Definition: mpegpicture.h:32
MpegEncContext::mv
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:272
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:259
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:108
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:108
MAX_FCODE
#define MAX_FCODE
Definition: mpegutils.h:41
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:830
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:232
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1747
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:65
sp5x.h
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
estimate_qp
static int estimate_qp(MpegEncContext *s, int dry_run)
Definition: mpegvideo_enc.c:3519
FDCTDSPContext
Definition: fdctdsp.h:26
faandct.h
Floating point AAN DCT.
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:202
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:196
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:877
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:435
mpeg12enc.h
merge_context_after_me
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3485
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:190
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:49
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:454
skip_check
static int skip_check(MpegEncContext *s, const Picture *p, const Picture *ref)
Definition: mpegvideo_enc.c:1246
fail
#define fail()
Definition: checkasm.h:134
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:139
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:105
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1003
ff_h263dsp_init
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:506
perm
perm
Definition: f_perms.c:75
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:484
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2916
FF_ALLOC_TYPED_ARRAY
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:96
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:313
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
pts
static int64_t pts
Definition: transcode_aac.c:653
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:42
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
ff_h263_update_motion_val
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:53
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:297
ff_sqrt
#define ff_sqrt
Definition: mathops.h:218
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
mpv_encode_init_static
static void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:266
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:99
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:440
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
ff_dct_encode_init
av_cold int ff_dct_encode_init(MpegEncContext *s)
Definition: mpegvideo_enc.c:291
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:309
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2814
ff_mjpeg_encode_mb
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:523
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:87
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:866
MpegEncContext::i_tex_bits
int i_tex_bits
Definition: mpegvideo.h:340
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:784
frame_start
static int frame_start(MpegEncContext *s)
Definition: mpegvideo_enc.c:1687
RateControlContext
rate control context.
Definition: ratecontrol.h:63
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:184
ff_init_qscale_tab
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:236
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
update_mb_info
static void update_mb_info(MpegEncContext *s, int startcode)
Definition: mpegvideo_enc.c:2791
av_cold
#define av_cold
Definition: attributes.h:90
dct.h
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
MAX_MV
#define MAX_MV
Definition: motion_est.h:35
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:93
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:129
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_enc.c:1033
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4186
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:721
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:153
width
#define width
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:40
ff_h263_chroma_qscale_table
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:260
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1050
s
#define s(width, name)
Definition: cbs_vp9.c:256
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:112
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:451
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
Definition: mpegpicture.c:312
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
MpegEncContext::mv_dir
int mv_dir
Definition: mpegvideo.h:257
g
const char * g
Definition: vf_curves.c:127
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:864
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1491
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:129
FMT_H261
@ FMT_H261
Definition: mpegutils.h:118
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1487
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:220
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1254
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1436
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:126
PutBitContext
Definition: put_bits.h:50
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:436
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:765
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2728
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
if
if(ret)
Definition: filter_design.txt:179
Picture::reference
int reference
Definition: mpegpicture.h:77
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1239
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:453
MpegEncContext::pb2
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:391
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:182
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:37
ff_mpeg4_encode_mb
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:484
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
PutBitContext::buf
uint8_t * buf
Definition: put_bits.h:53
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:149
NULL
#define NULL
Definition: coverity.c:32
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpv_reconstruct_mb_template.c:56
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:594
write_mb_info
static void write_mb_info(MpegEncContext *s)
Definition: mpegvideo_enc.c:2771
run
uint8_t run
Definition: svq3.c:203
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:284
MpegEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideo.h:326
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:225
bias
static int bias(int x, int c)
Definition: vqcdec.c:113
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:342
me
#define me
Definition: vf_colormatrix.c:104
aandcttab.h
EDGE_WIDTH
#define EDGE_WIDTH
Definition: mpegpicture.h:34
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:168
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:461
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:476
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:48
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:114
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo_enc.c:249
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:881
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:278
mathops.h
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:326
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3484
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:820
ff_vbv_update
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:680
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:980
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:899
qpeldsp.h
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:115
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1289
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
mpeg12codecs.h
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1324
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:254
ff_mjpeg_amv_encode_picture_header
void ff_mjpeg_amv_encode_picture_header(MpegEncContext *s)
Definition: mjpegenc.c:93
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegutils.h:120
AVOnce
#define AVOnce
Definition: thread.h:181
ff_mpeg4_encode_picture_header
int ff_mpeg4_encode_picture_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1059
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
ff_mpeg1_encode_init
void ff_mpeg1_encode_init(MpegEncContext *s)
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1562
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
ff_clean_h263_qscales
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
Definition: ituh263enc.c:272
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:263
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:751
mpv_encode_defaults
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:276
denoise_dct_c
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
Definition: mpegvideo_enc.c:3847
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:411
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1337
tab
static const uint8_t tab[16]
Definition: rka.c:668
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:548
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1364
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:131
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1296
encode_mb_internal
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2110
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:228
merge_context_after_encode
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3491
MpegEncContext::mb_intra
int mb_intra
Definition: mpegvideo.h:286
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:81
Picture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:80
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:301
AVPacket::size
int size
Definition: packet.h:375
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:371
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:620
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:195
load_input_picture
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1095
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:57
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:344
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
Definition: mpegvideo_enc.c:1996
shift
static int shift(int a, int b)
Definition: bonk.c:257
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:296
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:595
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1062
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:106
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:29
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:198
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
MpegEncContext::dct_count
int dct_count[2]
Definition: mpegvideo.h:327
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegutils.h:121
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:281
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:95
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:41
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:112
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:94
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:136
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:162
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:373
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:293
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
MpegEncContext::mv_type
int mv_type
Definition: mpegvideo.h:261
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:111
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:265
ff_h263_encode_mb
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:494
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:380
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:62
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:141
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:815
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:36
M_PI
#define M_PI
Definition: mathematics.h:52
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
PutBitContext::buf_end
uint8_t * buf_end
Definition: put_bits.h:53
AVCodec::id
enum AVCodecID id
Definition: codec.h:198
update_qscale
static void update_qscale(MpegEncContext *s)
Definition: mpegvideo_enc.c:192
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:138
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:452
ff_msmpeg4_encode_picture_header
void ff_msmpeg4_encode_picture_header(MpegEncContext *s)
Definition: msmpeg4enc.c:219
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:156
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
ff_speedhq_encode_mb
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: speedhqenc.c:246
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:456
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:450
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:367
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3871
MpegEncContext::encoding_error
uint64_t encoding_error[MPEGVIDEO_MAX_PLANES]
Definition: mpegvideo.h:252
get_intra_count
static int get_intra_count(MpegEncContext *s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1064
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2848
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:35
estimate_best_b_count
static int estimate_best_b_count(MpegEncContext *s)
Definition: mpegvideo_enc.c:1308
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:926
src2
const pixel * src2
Definition: h264pred_template.c:422
MpegEncContext::tex_pb
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:390
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:131
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:284
qmat16
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:49
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:105
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:816
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:831
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:478
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
copy_context_before_encode
static void copy_context_before_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2511
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:146
ff_init_me
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:308
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:246
AVCodecContext::height
int height
Definition: avcodec.h:598
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:483
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:671
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:102
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
idctdsp.h
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:331
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:147
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1341
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:110
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
encode_mb_hq
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2578
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:105
ff_mpeg1_encode_picture_header
void ff_mpeg1_encode_picture_header(MpegEncContext *s)
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
Picture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:81
mpeg12data.h
ff_set_mpeg4_time
void ff_set_mpeg4_time(MpegEncContext *s)
Definition: mpeg4videoenc.c:874
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
ff_fix_long_mvs
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1701
MpegEncContext::dquant
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:201
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:453
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:772
AVCodecContext
main external API structure.
Definition: avcodec.h:426
default_mv_penalty
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:92
Picture::shared
int shared
Definition: mpegpicture.h:78
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:887
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1363
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:377
MpegEncContext::skip_count
int skip_count
Definition: mpegvideo.h:343
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:97
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: avpacket.c:230
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1218
AVRational::den
int den
Denominator.
Definition: rational.h:60
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:47
select_input_picture
static int select_input_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:1453
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:758
frame_end
static void frame_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:1629
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
temp
else temp
Definition: vf_mcdeint.c:248
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:857
flvenc.h
av_clip_uint8
#define av_clip_uint8
Definition: common.h:101
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:84
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:103
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:901
ff_h263_encode_picture_header
void ff_h263_encode_picture_header(MpegEncContext *s)
Definition: ituh263enc.c:108
desc
const char * desc
Definition: libsvtav1.c:83
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
ff_speedhq_mb_y_order_to_mb
int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.c:274
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
msmpeg4enc.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:697
MpegEncContext::noise_reduction
int noise_reduction
Definition: mpegvideo.h:538
ff_convert_matrix
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:108
packet_internal.h
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:311
MpegEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideo.h:532
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1268
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
dct_quantize_refine
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4205
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MpegEncContext *s)
Definition: rv10enc.c:34
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
copy_context_after_encode
static void copy_context_after_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2539
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:258
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1025
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:900
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
mpeg4videodata.h
clip_coeffs
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
Definition: mpegvideo_enc.c:2052
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:919
MAX_B_FRAMES
#define MAX_B_FRAMES
Definition: mpegvideo.h:53
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
d
d
Definition: ffmpeg_filter.c:156
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:598
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:78
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sse
static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2629
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:344
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:461
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
ff_get_2pass_fcode
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:855
h
h
Definition: vp9dsp_template.c:2038
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:144
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:40
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
ff_dct_quantize_c
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4557
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
int
int
Definition: ffmpeg_filter.c:156
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:277
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:35
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:220
ff_wmv2_encode_mb
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:169
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:795
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:341
ff_rv20_encode_picture_header
void ff_rv20_encode_picture_header(MpegEncContext *s)
Definition: rv20enc.c:37
pixblockdsp.h
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:104
ff_get_best_fcode
int ff_get_best_fcode(MpegEncContext *s, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1599
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:1014
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:289
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:449
h263.h
write_slice_end
static void write_slice_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:2751
intmath.h