FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
43 #include "avcodec.h"
44 #include "dct.h"
45 #include "idctdsp.h"
46 #include "mpeg12.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
49 #include "h261.h"
50 #include "h263.h"
51 #include "h263data.h"
52 #include "mjpegenc_common.h"
53 #include "mathops.h"
54 #include "mpegutils.h"
55 #include "mjpegenc.h"
56 #include "speedhqenc.h"
57 #include "msmpeg4.h"
58 #include "pixblockdsp.h"
59 #include "qpeldsp.h"
60 #include "faandct.h"
61 #include "thread.h"
62 #include "aandcttab.h"
63 #include "flv.h"
64 #include "mpeg4video.h"
65 #include "internal.h"
66 #include "bytestream.h"
67 #include "wmv2.h"
68 #include "rv10.h"
69 #include "packet_internal.h"
70 #include <limits.h>
71 #include "sp5x.h"
72 
73 #define QUANT_BIAS_SHIFT 8
74 
75 #define QMAT_SHIFT_MMX 16
76 #define QMAT_SHIFT 21
77 
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
83 
86 
89 #if FF_API_MPEGVIDEO_OPTS
94 #endif
95  { NULL },
96 };
97 
98 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
99  uint16_t (*qmat16)[2][64],
100  const uint16_t *quant_matrix,
101  int bias, int qmin, int qmax, int intra)
102 {
103  FDCTDSPContext *fdsp = &s->fdsp;
104  int qscale;
105  int shift = 0;
106 
107  for (qscale = qmin; qscale <= qmax; qscale++) {
108  int i;
109  int qscale2;
110 
111  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
112  else qscale2 = qscale << 1;
113 
114  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
115 #if CONFIG_FAANDCT
116  fdsp->fdct == ff_faandct ||
117 #endif /* CONFIG_FAANDCT */
119  for (i = 0; i < 64; i++) {
120  const int j = s->idsp.idct_permutation[i];
121  int64_t den = (int64_t) qscale2 * quant_matrix[j];
122  /* 16 <= qscale * quant_matrix[i] <= 7905
123  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
124  * 19952 <= x <= 249205026
125  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
126  * 3444240 >= (1 << 36) / (x) >= 275 */
127 
128  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
129  }
130  } else if (fdsp->fdct == ff_fdct_ifast) {
131  for (i = 0; i < 64; i++) {
132  const int j = s->idsp.idct_permutation[i];
133  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
134  /* 16 <= qscale * quant_matrix[i] <= 7905
135  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
136  * 19952 <= x <= 249205026
137  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
138  * 3444240 >= (1 << 36) / (x) >= 275 */
139 
140  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
141  }
142  } else {
143  for (i = 0; i < 64; i++) {
144  const int j = s->idsp.idct_permutation[i];
145  int64_t den = (int64_t) qscale2 * quant_matrix[j];
146  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
147  * Assume x = qscale * quant_matrix[i]
148  * So 16 <= x <= 7905
149  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
150  * so 32768 >= (1 << 19) / (x) >= 67 */
151  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
152  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
153  // (qscale * quant_matrix[i]);
154  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
155 
156  if (qmat16[qscale][0][i] == 0 ||
157  qmat16[qscale][0][i] == 128 * 256)
158  qmat16[qscale][0][i] = 128 * 256 - 1;
159  qmat16[qscale][1][i] =
160  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
161  qmat16[qscale][0][i]);
162  }
163  }
164 
165  for (i = intra; i < 64; i++) {
166  int64_t max = 8191;
167  if (fdsp->fdct == ff_fdct_ifast) {
168  max = (8191LL * ff_aanscales[i]) >> 14;
169  }
170  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
171  shift++;
172  }
173  }
174  }
175  if (shift) {
176  av_log(s->avctx, AV_LOG_INFO,
177  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
178  QMAT_SHIFT - shift);
179  }
180 }
181 
182 static inline void update_qscale(MpegEncContext *s)
183 {
184  if (s->q_scale_type == 1 && 0) {
185  int i;
186  int bestdiff=INT_MAX;
187  int best = 1;
188 
189  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
190  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
191  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
192  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
193  continue;
194  if (diff < bestdiff) {
195  bestdiff = diff;
196  best = i;
197  }
198  }
199  s->qscale = best;
200  } else {
201  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
202  (FF_LAMBDA_SHIFT + 7);
203  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
204  }
205 
206  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
208 }
209 
210 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
211 {
212  int i;
213 
214  if (matrix) {
215  put_bits(pb, 1, 1);
216  for (i = 0; i < 64; i++) {
217  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
218  }
219  } else
220  put_bits(pb, 1, 0);
221 }
222 
223 /**
224  * init s->current_picture.qscale_table from s->lambda_table
225  */
227 {
228  int8_t * const qscale_table = s->current_picture.qscale_table;
229  int i;
230 
231  for (i = 0; i < s->mb_num; i++) {
232  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
233  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
234  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
235  s->avctx->qmax);
236  }
237 }
238 
241 {
242 #define COPY(a) dst->a= src->a
243  COPY(pict_type);
245  COPY(f_code);
246  COPY(b_code);
247  COPY(qscale);
248  COPY(lambda);
249  COPY(lambda2);
252  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
253  COPY(progressive_frame); // FIXME don't set in encode_header
254  COPY(partitioned_frame); // FIXME don't set in encode_header
255 #undef COPY
256 }
257 
258 static void mpv_encode_init_static(void)
259 {
260  for (int i = -16; i < 16; i++)
261  default_fcode_tab[i + MAX_MV] = 1;
262 }
263 
264 /**
265  * Set the given MpegEncContext to defaults for encoding.
266  * the changed fields will not depend upon the prior state of the MpegEncContext.
267  */
269 {
270  static AVOnce init_static_once = AV_ONCE_INIT;
271 
273 
274  ff_thread_once(&init_static_once, mpv_encode_init_static);
275 
276  s->me.mv_penalty = default_mv_penalty;
277  s->fcode_tab = default_fcode_tab;
278 
279  s->input_picture_number = 0;
280  s->picture_in_gop_number = 0;
281 }
282 
284 {
285  if (ARCH_X86)
287 
288  if (CONFIG_H263_ENCODER)
289  ff_h263dsp_init(&s->h263dsp);
290  if (!s->dct_quantize)
291  s->dct_quantize = ff_dct_quantize_c;
292  if (!s->denoise_dct)
293  s->denoise_dct = denoise_dct_c;
294  s->fast_dct_quantize = s->dct_quantize;
295  if (s->avctx->trellis)
296  s->dct_quantize = dct_quantize_trellis_c;
297 
298  return 0;
299 }
300 
301 /* init video encoder */
303 {
305  AVCPBProperties *cpb_props;
306  int i, ret;
307 
309 
310  switch (avctx->pix_fmt) {
311  case AV_PIX_FMT_YUVJ444P:
312  case AV_PIX_FMT_YUV444P:
313  s->chroma_format = CHROMA_444;
314  break;
315  case AV_PIX_FMT_YUVJ422P:
316  case AV_PIX_FMT_YUV422P:
317  s->chroma_format = CHROMA_422;
318  break;
319  case AV_PIX_FMT_YUVJ420P:
320  case AV_PIX_FMT_YUV420P:
321  default:
322  s->chroma_format = CHROMA_420;
323  break;
324  }
325 
327 
328 #if FF_API_PRIVATE_OPT
330  if (avctx->rtp_payload_size)
331  s->rtp_payload_size = avctx->rtp_payload_size;
333  s->me_penalty_compensation = avctx->me_penalty_compensation;
334  if (avctx->pre_me)
335  s->me_pre = avctx->pre_me;
337 #endif
338 
339  s->bit_rate = avctx->bit_rate;
340  s->width = avctx->width;
341  s->height = avctx->height;
342  if (avctx->gop_size > 600 &&
345  "keyframe interval too large!, reducing it from %d to %d\n",
346  avctx->gop_size, 600);
347  avctx->gop_size = 600;
348  }
349  s->gop_size = avctx->gop_size;
350  s->avctx = avctx;
352  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
353  "is %d.\n", MAX_B_FRAMES);
355  }
356  s->max_b_frames = avctx->max_b_frames;
357  s->codec_id = avctx->codec->id;
358  s->strict_std_compliance = avctx->strict_std_compliance;
359  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
360  s->rtp_mode = !!s->rtp_payload_size;
361  s->intra_dc_precision = avctx->intra_dc_precision;
362 
363  // workaround some differences between how applications specify dc precision
364  if (s->intra_dc_precision < 0) {
365  s->intra_dc_precision += 8;
366  } else if (s->intra_dc_precision >= 8)
367  s->intra_dc_precision -= 8;
368 
369  if (s->intra_dc_precision < 0) {
371  "intra dc precision must be positive, note some applications use"
372  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
373  return AVERROR(EINVAL);
374  }
375 
377  s->huffman = 0;
378 
379  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
380  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
381  return AVERROR(EINVAL);
382  }
383  s->user_specified_pts = AV_NOPTS_VALUE;
384 
385  if (s->gop_size <= 1) {
386  s->intra_only = 1;
387  s->gop_size = 12;
388  } else {
389  s->intra_only = 0;
390  }
391 
392  /* Fixed QSCALE */
393  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
394 
395  s->adaptive_quant = (avctx->lumi_masking ||
396  avctx->dark_masking ||
399  avctx->p_masking ||
400  s->border_masking ||
401  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
402  !s->fixed_qscale;
403 
404  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
405 
407  switch(avctx->codec_id) {
410  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
411  break;
412  case AV_CODEC_ID_MPEG4:
416  if (avctx->rc_max_rate >= 15000000) {
417  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
418  } else if(avctx->rc_max_rate >= 2000000) {
419  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
420  } else if(avctx->rc_max_rate >= 384000) {
421  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
422  } else
423  avctx->rc_buffer_size = 40;
424  avctx->rc_buffer_size *= 16384;
425  break;
426  }
427  if (avctx->rc_buffer_size) {
428  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
429  }
430  }
431 
432  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
433  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
434  return AVERROR(EINVAL);
435  }
436 
439  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
440  }
441 
443  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
444  return AVERROR(EINVAL);
445  }
446 
448  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
449  return AVERROR(EINVAL);
450  }
451 
452  if (avctx->rc_max_rate &&
456  "impossible bitrate constraints, this will fail\n");
457  }
458 
459  if (avctx->rc_buffer_size &&
460  avctx->bit_rate * (int64_t)avctx->time_base.num >
461  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
462  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
463  return AVERROR(EINVAL);
464  }
465 
466  if (!s->fixed_qscale &&
470  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
472  }
473 
474  if (avctx->rc_max_rate &&
476  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
477  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
478  90000LL * (avctx->rc_buffer_size - 1) >
479  avctx->rc_max_rate * 0xFFFFLL) {
481  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
482  "specified vbv buffer is too large for the given bitrate!\n");
483  }
484 
485  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
486  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
487  s->codec_id != AV_CODEC_ID_FLV1) {
488  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
489  return AVERROR(EINVAL);
490  }
491 
492  if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
494  "OBMC is only supported with simple mb decision\n");
495  return AVERROR(EINVAL);
496  }
497 
498  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
499  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
500  return AVERROR(EINVAL);
501  }
502 
503  if (s->max_b_frames &&
504  s->codec_id != AV_CODEC_ID_MPEG4 &&
505  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
506  s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
507  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
508  return AVERROR(EINVAL);
509  }
510  if (s->max_b_frames < 0) {
512  "max b frames must be 0 or positive for mpegvideo based encoders\n");
513  return AVERROR(EINVAL);
514  }
515 
516  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
517  s->codec_id == AV_CODEC_ID_H263 ||
518  s->codec_id == AV_CODEC_ID_H263P) &&
519  (avctx->sample_aspect_ratio.num > 255 ||
520  avctx->sample_aspect_ratio.den > 255)) {
522  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
526  }
527 
528  if ((s->codec_id == AV_CODEC_ID_H263 ||
529  s->codec_id == AV_CODEC_ID_H263P) &&
530  (avctx->width > 2048 ||
531  avctx->height > 1152 )) {
532  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
533  return AVERROR(EINVAL);
534  }
535  if ((s->codec_id == AV_CODEC_ID_H263 ||
536  s->codec_id == AV_CODEC_ID_H263P ||
537  s->codec_id == AV_CODEC_ID_RV20) &&
538  ((avctx->width &3) ||
539  (avctx->height&3) )) {
540  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
541  return AVERROR(EINVAL);
542  }
543 
544  if (s->codec_id == AV_CODEC_ID_RV10 &&
545  (avctx->width &15 ||
546  avctx->height&15 )) {
547  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
548  return AVERROR(EINVAL);
549  }
550 
551  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
552  s->codec_id == AV_CODEC_ID_WMV2) &&
553  avctx->width & 1) {
554  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
555  return AVERROR(EINVAL);
556  }
557 
559  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
560  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
561  return AVERROR(EINVAL);
562  }
563 
564 #if FF_API_PRIVATE_OPT
566  if (avctx->mpeg_quant)
567  s->mpeg_quant = 1;
569 
570  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
571  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
573  "mpeg2 style quantization not supported by codec\n");
574  return AVERROR(EINVAL);
575  }
576 #endif
577 
578  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
579  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
580  return AVERROR(EINVAL);
581  }
582 
583  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
585  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
586  return AVERROR(EINVAL);
587  }
588 
589  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
590  (s->codec_id == AV_CODEC_ID_AMV ||
591  s->codec_id == AV_CODEC_ID_MJPEG)) {
592  // Used to produce garbage with MJPEG.
594  "QP RD is no longer compatible with MJPEG or AMV\n");
595  return AVERROR(EINVAL);
596  }
597 
598 #if FF_API_PRIVATE_OPT
601  s->scenechange_threshold = avctx->scenechange_threshold;
603 #endif
604 
605  if (s->scenechange_threshold < 1000000000 &&
608  "closed gop with scene change detection are not supported yet, "
609  "set threshold to 1000000000\n");
610  return AVERROR_PATCHWELCOME;
611  }
612 
614  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
615  s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
617  "low delay forcing is only available for mpeg2, "
618  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
619  return AVERROR(EINVAL);
620  }
621  if (s->max_b_frames != 0) {
623  "B-frames cannot be used with low delay\n");
624  return AVERROR(EINVAL);
625  }
626  }
627 
628  if (s->q_scale_type == 1) {
629  if (avctx->qmax > 28) {
631  "non linear quant only supports qmax <= 28 currently\n");
632  return AVERROR_PATCHWELCOME;
633  }
634  }
635 
636  if (avctx->slices > 1 &&
638  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
639  return AVERROR(EINVAL);
640  }
641 
642  if (avctx->thread_count > 1 &&
643  s->codec_id != AV_CODEC_ID_MPEG4 &&
644  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
645  s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
646  s->codec_id != AV_CODEC_ID_MJPEG &&
647  (s->codec_id != AV_CODEC_ID_H263P)) {
649  "multi threaded encoding not supported by codec\n");
650  return AVERROR_PATCHWELCOME;
651  }
652 
653  if (avctx->thread_count < 1) {
655  "automatic thread number detection not supported by codec, "
656  "patch welcome\n");
657  return AVERROR_PATCHWELCOME;
658  }
659 
660 #if FF_API_PRIVATE_OPT
662  if (avctx->b_frame_strategy)
663  s->b_frame_strategy = avctx->b_frame_strategy;
664  if (avctx->b_sensitivity != 40)
665  s->b_sensitivity = avctx->b_sensitivity;
667 #endif
668 
669  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
671  "notice: b_frame_strategy only affects the first pass\n");
672  s->b_frame_strategy = 0;
673  }
674 
676  if (i > 1) {
677  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
678  avctx->time_base.den /= i;
679  avctx->time_base.num /= i;
680  //return -1;
681  }
682 
683  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
684  // (a + x * 3 / 8) / x
685  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
686  s->inter_quant_bias = 0;
687  } else {
688  s->intra_quant_bias = 0;
689  // (a - x / 4) / x
690  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
691  }
692 
693  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
694  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
695  return AVERROR(EINVAL);
696  }
697 
698  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
699 
700  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
701  avctx->time_base.den > (1 << 16) - 1) {
703  "timebase %d/%d not supported by MPEG 4 standard, "
704  "the maximum admitted value for the timebase denominator "
705  "is %d\n", avctx->time_base.num, avctx->time_base.den,
706  (1 << 16) - 1);
707  return AVERROR(EINVAL);
708  }
709  s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
710 
711  switch (avctx->codec->id) {
713  s->out_format = FMT_MPEG1;
714  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
715  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
716  break;
718  s->out_format = FMT_MPEG1;
719  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
720  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
721  s->rtp_mode = 1;
722  break;
723 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
724  case AV_CODEC_ID_MJPEG:
725  case AV_CODEC_ID_AMV:
726  s->out_format = FMT_MJPEG;
727  s->intra_only = 1; /* force intra only for jpeg */
728  if ((ret = ff_mjpeg_encode_init(s)) < 0)
729  return ret;
730  avctx->delay = 0;
731  s->low_delay = 1;
732  break;
733 #endif
734  case AV_CODEC_ID_SPEEDHQ:
735  s->out_format = FMT_SPEEDHQ;
736  s->intra_only = 1; /* force intra only for SHQ */
737  if (!CONFIG_SPEEDHQ_ENCODER)
739  if ((ret = ff_speedhq_encode_init(s)) < 0)
740  return ret;
741  avctx->delay = 0;
742  s->low_delay = 1;
743  break;
744  case AV_CODEC_ID_H261:
745  if (!CONFIG_H261_ENCODER)
747  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
749  "The specified picture size of %dx%d is not valid for the "
750  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
751  s->width, s->height);
752  return AVERROR(EINVAL);
753  }
754  s->out_format = FMT_H261;
755  avctx->delay = 0;
756  s->low_delay = 1;
757  s->rtp_mode = 0; /* Sliced encoding not supported */
758  break;
759  case AV_CODEC_ID_H263:
760  if (!CONFIG_H263_ENCODER)
763  s->width, s->height) == 8) {
765  "The specified picture size of %dx%d is not valid for "
766  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
767  "352x288, 704x576, and 1408x1152. "
768  "Try H.263+.\n", s->width, s->height);
769  return AVERROR(EINVAL);
770  }
771  s->out_format = FMT_H263;
772  avctx->delay = 0;
773  s->low_delay = 1;
774  break;
775  case AV_CODEC_ID_H263P:
776  s->out_format = FMT_H263;
777  s->h263_plus = 1;
778  /* Fx */
779  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
780  s->modified_quant = s->h263_aic;
781  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
782  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
783 
784  /* /Fx */
785  /* These are just to be sure */
786  avctx->delay = 0;
787  s->low_delay = 1;
788  break;
789  case AV_CODEC_ID_FLV1:
790  s->out_format = FMT_H263;
791  s->h263_flv = 2; /* format = 1; 11-bit codes */
792  s->unrestricted_mv = 1;
793  s->rtp_mode = 0; /* don't allow GOB */
794  avctx->delay = 0;
795  s->low_delay = 1;
796  break;
797  case AV_CODEC_ID_RV10:
798  s->out_format = FMT_H263;
799  avctx->delay = 0;
800  s->low_delay = 1;
801  break;
802  case AV_CODEC_ID_RV20:
803  s->out_format = FMT_H263;
804  avctx->delay = 0;
805  s->low_delay = 1;
806  s->modified_quant = 1;
807  s->h263_aic = 1;
808  s->h263_plus = 1;
809  s->loop_filter = 1;
810  s->unrestricted_mv = 0;
811  break;
812  case AV_CODEC_ID_MPEG4:
813  s->out_format = FMT_H263;
814  s->h263_pred = 1;
815  s->unrestricted_mv = 1;
816  s->low_delay = s->max_b_frames ? 0 : 1;
817  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
818  break;
820  s->out_format = FMT_H263;
821  s->h263_pred = 1;
822  s->unrestricted_mv = 1;
823  s->msmpeg4_version = 2;
824  avctx->delay = 0;
825  s->low_delay = 1;
826  break;
828  s->out_format = FMT_H263;
829  s->h263_pred = 1;
830  s->unrestricted_mv = 1;
831  s->msmpeg4_version = 3;
832  s->flipflop_rounding = 1;
833  avctx->delay = 0;
834  s->low_delay = 1;
835  break;
836  case AV_CODEC_ID_WMV1:
837  s->out_format = FMT_H263;
838  s->h263_pred = 1;
839  s->unrestricted_mv = 1;
840  s->msmpeg4_version = 4;
841  s->flipflop_rounding = 1;
842  avctx->delay = 0;
843  s->low_delay = 1;
844  break;
845  case AV_CODEC_ID_WMV2:
846  s->out_format = FMT_H263;
847  s->h263_pred = 1;
848  s->unrestricted_mv = 1;
849  s->msmpeg4_version = 5;
850  s->flipflop_rounding = 1;
851  avctx->delay = 0;
852  s->low_delay = 1;
853  break;
854  default:
855  return AVERROR(EINVAL);
856  }
857 
858 #if FF_API_PRIVATE_OPT
860  if (avctx->noise_reduction)
861  s->noise_reduction = avctx->noise_reduction;
863 #endif
864 
865  avctx->has_b_frames = !s->low_delay;
866 
867  s->encoding = 1;
868 
869  s->progressive_frame =
870  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
872  s->alternate_scan);
873 
874  /* init */
876  if ((ret = ff_mpv_common_init(s)) < 0)
877  return ret;
878 
879  ff_fdctdsp_init(&s->fdsp, avctx);
880  ff_me_cmp_init(&s->mecc, avctx);
881  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
882  ff_pixblockdsp_init(&s->pdsp, avctx);
883  ff_qpeldsp_init(&s->qdsp);
884 
885  if (s->msmpeg4_version) {
886  int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
887  if (!(s->ac_stats = av_mallocz(ac_stats_size)))
888  return AVERROR(ENOMEM);
889  }
890 
891  if (!(avctx->stats_out = av_mallocz(256)) ||
892  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
893  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
894  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
895  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
896  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
897  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
898  !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
899  !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
900  return AVERROR(ENOMEM);
901 
902  if (s->noise_reduction) {
903  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
904  return AVERROR(ENOMEM);
905  }
906 
908 
909  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
910  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
911 
912  if (s->slice_context_count > 1) {
913  s->rtp_mode = 1;
914 
916  s->h263_slice_structured = 1;
917  }
918 
919  s->quant_precision = 5;
920 
921 #if FF_API_PRIVATE_OPT
924  s->frame_skip_threshold = avctx->frame_skip_threshold;
926  s->frame_skip_factor = avctx->frame_skip_factor;
927  if (avctx->frame_skip_exp)
928  s->frame_skip_exp = avctx->frame_skip_exp;
930  s->frame_skip_cmp = avctx->frame_skip_cmp;
932 #endif
933 
934  ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
935  ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
936 
937  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
939  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
941  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
943  if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
944  && s->out_format == FMT_MPEG1)
946 
947  /* init q matrix */
948  for (i = 0; i < 64; i++) {
949  int j = s->idsp.idct_permutation[i];
950  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
951  s->mpeg_quant) {
952  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
953  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
954  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
955  s->intra_matrix[j] =
956  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
957  } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
958  s->intra_matrix[j] =
959  s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
960  } else {
961  /* MPEG-1/2 */
962  s->chroma_intra_matrix[j] =
963  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
964  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
965  }
966  if (avctx->intra_matrix)
967  s->intra_matrix[j] = avctx->intra_matrix[i];
968  if (avctx->inter_matrix)
969  s->inter_matrix[j] = avctx->inter_matrix[i];
970  }
971 
972  /* precompute matrix */
973  /* for mjpeg, we do include qscale in the matrix */
974  if (s->out_format != FMT_MJPEG) {
975  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
976  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
977  31, 1);
978  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
979  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
980  31, 0);
981  }
982 
983  if ((ret = ff_rate_control_init(s)) < 0)
984  return ret;
985 
986 #if FF_API_PRIVATE_OPT
988  if (avctx->brd_scale)
989  s->brd_scale = avctx->brd_scale;
990 
992  s->pred = avctx->prediction_method + 1;
994 #endif
995 
996  if (s->b_frame_strategy == 2) {
997  for (i = 0; i < s->max_b_frames + 2; i++) {
998  s->tmp_frames[i] = av_frame_alloc();
999  if (!s->tmp_frames[i])
1000  return AVERROR(ENOMEM);
1001 
1002  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1003  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1004  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1005 
1006  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1007  if (ret < 0)
1008  return ret;
1009  }
1010  }
1011 
1012  cpb_props = ff_add_cpb_side_data(avctx);
1013  if (!cpb_props)
1014  return AVERROR(ENOMEM);
1015  cpb_props->max_bitrate = avctx->rc_max_rate;
1016  cpb_props->min_bitrate = avctx->rc_min_rate;
1017  cpb_props->avg_bitrate = avctx->bit_rate;
1018  cpb_props->buffer_size = avctx->rc_buffer_size;
1019 
1020  return 0;
1021 }
1022 
1024 {
1026  int i;
1027 
1029 
1031  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
1032  s->out_format == FMT_MJPEG)
1034 
1036 
1037  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1038  av_frame_free(&s->tmp_frames[i]);
1039 
1040  ff_free_picture_tables(&s->new_picture);
1041  ff_mpeg_unref_picture(avctx, &s->new_picture);
1042 
1044  av_freep(&s->ac_stats);
1045 
1046  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1047  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1048  s->q_chroma_intra_matrix= NULL;
1049  s->q_chroma_intra_matrix16= NULL;
1050  av_freep(&s->q_intra_matrix);
1051  av_freep(&s->q_inter_matrix);
1052  av_freep(&s->q_intra_matrix16);
1053  av_freep(&s->q_inter_matrix16);
1054  av_freep(&s->input_picture);
1055  av_freep(&s->reordered_input_picture);
1056  av_freep(&s->dct_offset);
1057 
1058  return 0;
1059 }
1060 
1061 static int get_sae(uint8_t *src, int ref, int stride)
1062 {
1063  int x,y;
1064  int acc = 0;
1065 
1066  for (y = 0; y < 16; y++) {
1067  for (x = 0; x < 16; x++) {
1068  acc += FFABS(src[x + y * stride] - ref);
1069  }
1070  }
1071 
1072  return acc;
1073 }
1074 
1076  uint8_t *ref, int stride)
1077 {
1078  int x, y, w, h;
1079  int acc = 0;
1080 
1081  w = s->width & ~15;
1082  h = s->height & ~15;
1083 
1084  for (y = 0; y < h; y += 16) {
1085  for (x = 0; x < w; x += 16) {
1086  int offset = x + y * stride;
1087  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1088  stride, 16);
1089  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1090  int sae = get_sae(src + offset, mean, stride);
1091 
1092  acc += sae + 500 < sad;
1093  }
1094  }
1095  return acc;
1096 }
1097 
1098 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1099 {
1100  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1101  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1102  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1103  &s->linesize, &s->uvlinesize);
1104 }
1105 
1106 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1107 {
1108  Picture *pic = NULL;
1109  int64_t pts;
1110  int i, display_picture_number = 0, ret;
1111  int encoding_delay = s->max_b_frames ? s->max_b_frames
1112  : (s->low_delay ? 0 : 1);
1113  int flush_offset = 1;
1114  int direct = 1;
1115 
1116  if (pic_arg) {
1117  pts = pic_arg->pts;
1118  display_picture_number = s->input_picture_number++;
1119 
1120  if (pts != AV_NOPTS_VALUE) {
1121  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1122  int64_t last = s->user_specified_pts;
1123 
1124  if (pts <= last) {
1125  av_log(s->avctx, AV_LOG_ERROR,
1126  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1127  pts, last);
1128  return AVERROR(EINVAL);
1129  }
1130 
1131  if (!s->low_delay && display_picture_number == 1)
1132  s->dts_delta = pts - last;
1133  }
1134  s->user_specified_pts = pts;
1135  } else {
1136  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1137  s->user_specified_pts =
1138  pts = s->user_specified_pts + 1;
1139  av_log(s->avctx, AV_LOG_INFO,
1140  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1141  pts);
1142  } else {
1143  pts = display_picture_number;
1144  }
1145  }
1146 
1147  if (!pic_arg->buf[0] ||
1148  pic_arg->linesize[0] != s->linesize ||
1149  pic_arg->linesize[1] != s->uvlinesize ||
1150  pic_arg->linesize[2] != s->uvlinesize)
1151  direct = 0;
1152  if ((s->width & 15) || (s->height & 15))
1153  direct = 0;
1154  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1155  direct = 0;
1156  if (s->linesize & (STRIDE_ALIGN-1))
1157  direct = 0;
1158 
1159  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1160  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1161 
1162  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1163  if (i < 0)
1164  return i;
1165 
1166  pic = &s->picture[i];
1167  pic->reference = 3;
1168 
1169  if (direct) {
1170  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1171  return ret;
1172  }
1173  ret = alloc_picture(s, pic, direct);
1174  if (ret < 0)
1175  return ret;
1176 
1177  if (!direct) {
1178  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1179  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1180  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1181  // empty
1182  } else {
1183  int h_chroma_shift, v_chroma_shift;
1184  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1185  &h_chroma_shift,
1186  &v_chroma_shift);
1187 
1188  for (i = 0; i < 3; i++) {
1189  int src_stride = pic_arg->linesize[i];
1190  int dst_stride = i ? s->uvlinesize : s->linesize;
1191  int h_shift = i ? h_chroma_shift : 0;
1192  int v_shift = i ? v_chroma_shift : 0;
1193  int w = s->width >> h_shift;
1194  int h = s->height >> v_shift;
1195  uint8_t *src = pic_arg->data[i];
1196  uint8_t *dst = pic->f->data[i];
1197  int vpad = 16;
1198 
1199  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1200  && !s->progressive_sequence
1201  && FFALIGN(s->height, 32) - s->height > 16)
1202  vpad = 32;
1203 
1204  if (!s->avctx->rc_buffer_size)
1205  dst += INPLACE_OFFSET;
1206 
1207  if (src_stride == dst_stride)
1208  memcpy(dst, src, src_stride * h);
1209  else {
1210  int h2 = h;
1211  uint8_t *dst2 = dst;
1212  while (h2--) {
1213  memcpy(dst2, src, w);
1214  dst2 += dst_stride;
1215  src += src_stride;
1216  }
1217  }
1218  if ((s->width & 15) || (s->height & (vpad-1))) {
1219  s->mpvencdsp.draw_edges(dst, dst_stride,
1220  w, h,
1221  16 >> h_shift,
1222  vpad >> v_shift,
1223  EDGE_BOTTOM);
1224  }
1225  }
1226  emms_c();
1227  }
1228  }
1229  ret = av_frame_copy_props(pic->f, pic_arg);
1230  if (ret < 0)
1231  return ret;
1232 
1233  pic->f->display_picture_number = display_picture_number;
1234  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1235  } else {
1236  /* Flushing: When we have not received enough input frames,
1237  * ensure s->input_picture[0] contains the first picture */
1238  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1239  if (s->input_picture[flush_offset])
1240  break;
1241 
1242  if (flush_offset <= 1)
1243  flush_offset = 1;
1244  else
1245  encoding_delay = encoding_delay - flush_offset + 1;
1246  }
1247 
1248  /* shift buffer entries */
1249  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1250  s->input_picture[i - flush_offset] = s->input_picture[i];
1251 
1252  s->input_picture[encoding_delay] = (Picture*) pic;
1253 
1254  return 0;
1255 }
1256 
1258 {
1259  int x, y, plane;
1260  int score = 0;
1261  int64_t score64 = 0;
1262 
1263  for (plane = 0; plane < 3; plane++) {
1264  const int stride = p->f->linesize[plane];
1265  const int bw = plane ? 1 : 2;
1266  for (y = 0; y < s->mb_height * bw; y++) {
1267  for (x = 0; x < s->mb_width * bw; x++) {
1268  int off = p->shared ? 0 : 16;
1269  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1270  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1271  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1272 
1273  switch (FFABS(s->frame_skip_exp)) {
1274  case 0: score = FFMAX(score, v); break;
1275  case 1: score += FFABS(v); break;
1276  case 2: score64 += v * (int64_t)v; break;
1277  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1278  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1279  }
1280  }
1281  }
1282  }
1283  emms_c();
1284 
1285  if (score)
1286  score64 = score;
1287  if (s->frame_skip_exp < 0)
1288  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1289  -1.0/s->frame_skip_exp);
1290 
1291  if (score64 < s->frame_skip_threshold)
1292  return 1;
1293  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1294  return 1;
1295  return 0;
1296 }
1297 
1299 {
1300  int ret;
1301  int size = 0;
1302 
1304  if (ret < 0)
1305  return ret;
1306 
1307  do {
1309  if (ret >= 0) {
1310  size += pkt->size;
1312  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1313  return ret;
1314  } while (ret >= 0);
1315 
1316  return size;
1317 }
1318 
1320 {
1321  const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1322  AVPacket *pkt;
1323  const int scale = s->brd_scale;
1324  int width = s->width >> scale;
1325  int height = s->height >> scale;
1326  int i, j, out_size, p_lambda, b_lambda, lambda2;
1327  int64_t best_rd = INT64_MAX;
1328  int best_b_count = -1;
1329  int ret = 0;
1330 
1331  av_assert0(scale >= 0 && scale <= 3);
1332 
1333  pkt = av_packet_alloc();
1334  if (!pkt)
1335  return AVERROR(ENOMEM);
1336 
1337  //emms_c();
1338  //s->next_picture_ptr->quality;
1339  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1340  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1341  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1342  if (!b_lambda) // FIXME we should do this somewhere else
1343  b_lambda = p_lambda;
1344  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1346 
1347  for (i = 0; i < s->max_b_frames + 2; i++) {
1348  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1349  s->next_picture_ptr;
1350  uint8_t *data[4];
1351 
1352  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1353  pre_input = *pre_input_ptr;
1354  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1355 
1356  if (!pre_input.shared && i) {
1357  data[0] += INPLACE_OFFSET;
1358  data[1] += INPLACE_OFFSET;
1359  data[2] += INPLACE_OFFSET;
1360  }
1361 
1362  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1363  s->tmp_frames[i]->linesize[0],
1364  data[0],
1365  pre_input.f->linesize[0],
1366  width, height);
1367  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1368  s->tmp_frames[i]->linesize[1],
1369  data[1],
1370  pre_input.f->linesize[1],
1371  width >> 1, height >> 1);
1372  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1373  s->tmp_frames[i]->linesize[2],
1374  data[2],
1375  pre_input.f->linesize[2],
1376  width >> 1, height >> 1);
1377  }
1378  }
1379 
1380  for (j = 0; j < s->max_b_frames + 1; j++) {
1381  AVCodecContext *c;
1382  int64_t rd = 0;
1383 
1384  if (!s->input_picture[j])
1385  break;
1386 
1388  if (!c) {
1389  ret = AVERROR(ENOMEM);
1390  goto fail;
1391  }
1392 
1393  c->width = width;
1394  c->height = height;
1396  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1397  c->mb_decision = s->avctx->mb_decision;
1398  c->me_cmp = s->avctx->me_cmp;
1399  c->mb_cmp = s->avctx->mb_cmp;
1400  c->me_sub_cmp = s->avctx->me_sub_cmp;
1401  c->pix_fmt = AV_PIX_FMT_YUV420P;
1402  c->time_base = s->avctx->time_base;
1403  c->max_b_frames = s->max_b_frames;
1404 
1405  ret = avcodec_open2(c, codec, NULL);
1406  if (ret < 0)
1407  goto fail;
1408 
1409 
1410  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1411  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1412 
1413  out_size = encode_frame(c, s->tmp_frames[0], pkt);
1414  if (out_size < 0) {
1415  ret = out_size;
1416  goto fail;
1417  }
1418 
1419  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1420 
1421  for (i = 0; i < s->max_b_frames + 1; i++) {
1422  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1423 
1424  s->tmp_frames[i + 1]->pict_type = is_p ?
1426  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1427 
1428  out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1429  if (out_size < 0) {
1430  ret = out_size;
1431  goto fail;
1432  }
1433 
1434  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1435  }
1436 
1437  /* get the delayed frames */
1439  if (out_size < 0) {
1440  ret = out_size;
1441  goto fail;
1442  }
1443  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1444 
1445  rd += c->error[0] + c->error[1] + c->error[2];
1446 
1447  if (rd < best_rd) {
1448  best_rd = rd;
1449  best_b_count = j;
1450  }
1451 
1452 fail:
1455  if (ret < 0) {
1456  best_b_count = ret;
1457  break;
1458  }
1459  }
1460 
1461  av_packet_free(&pkt);
1462 
1463  return best_b_count;
1464 }
1465 
1467 {
1468  int i, ret;
1469 
1470  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1471  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1472  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1473 
1474  /* set next picture type & ordering */
1475  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1476  if (s->frame_skip_threshold || s->frame_skip_factor) {
1477  if (s->picture_in_gop_number < s->gop_size &&
1478  s->next_picture_ptr &&
1479  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1480  // FIXME check that the gop check above is +-1 correct
1481  av_frame_unref(s->input_picture[0]->f);
1482 
1483  ff_vbv_update(s, 0);
1484 
1485  goto no_output_pic;
1486  }
1487  }
1488 
1489  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1490  !s->next_picture_ptr || s->intra_only) {
1491  s->reordered_input_picture[0] = s->input_picture[0];
1492  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1493  s->reordered_input_picture[0]->f->coded_picture_number =
1494  s->coded_picture_number++;
1495  } else {
1496  int b_frames = 0;
1497 
1498  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1499  for (i = 0; i < s->max_b_frames + 1; i++) {
1500  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1501 
1502  if (pict_num >= s->rc_context.num_entries)
1503  break;
1504  if (!s->input_picture[i]) {
1505  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1506  break;
1507  }
1508 
1509  s->input_picture[i]->f->pict_type =
1510  s->rc_context.entry[pict_num].new_pict_type;
1511  }
1512  }
1513 
1514  if (s->b_frame_strategy == 0) {
1515  b_frames = s->max_b_frames;
1516  while (b_frames && !s->input_picture[b_frames])
1517  b_frames--;
1518  } else if (s->b_frame_strategy == 1) {
1519  for (i = 1; i < s->max_b_frames + 1; i++) {
1520  if (s->input_picture[i] &&
1521  s->input_picture[i]->b_frame_score == 0) {
1522  s->input_picture[i]->b_frame_score =
1524  s->input_picture[i ]->f->data[0],
1525  s->input_picture[i - 1]->f->data[0],
1526  s->linesize) + 1;
1527  }
1528  }
1529  for (i = 0; i < s->max_b_frames + 1; i++) {
1530  if (!s->input_picture[i] ||
1531  s->input_picture[i]->b_frame_score - 1 >
1532  s->mb_num / s->b_sensitivity)
1533  break;
1534  }
1535 
1536  b_frames = FFMAX(0, i - 1);
1537 
1538  /* reset scores */
1539  for (i = 0; i < b_frames + 1; i++) {
1540  s->input_picture[i]->b_frame_score = 0;
1541  }
1542  } else if (s->b_frame_strategy == 2) {
1543  b_frames = estimate_best_b_count(s);
1544  if (b_frames < 0)
1545  return b_frames;
1546  }
1547 
1548  emms_c();
1549 
1550  for (i = b_frames - 1; i >= 0; i--) {
1551  int type = s->input_picture[i]->f->pict_type;
1552  if (type && type != AV_PICTURE_TYPE_B)
1553  b_frames = i;
1554  }
1555  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1556  b_frames == s->max_b_frames) {
1557  av_log(s->avctx, AV_LOG_ERROR,
1558  "warning, too many B-frames in a row\n");
1559  }
1560 
1561  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1562  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1563  s->gop_size > s->picture_in_gop_number) {
1564  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1565  } else {
1566  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1567  b_frames = 0;
1568  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1569  }
1570  }
1571 
1572  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1573  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1574  b_frames--;
1575 
1576  s->reordered_input_picture[0] = s->input_picture[b_frames];
1577  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1578  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1579  s->reordered_input_picture[0]->f->coded_picture_number =
1580  s->coded_picture_number++;
1581  for (i = 0; i < b_frames; i++) {
1582  s->reordered_input_picture[i + 1] = s->input_picture[i];
1583  s->reordered_input_picture[i + 1]->f->pict_type =
1585  s->reordered_input_picture[i + 1]->f->coded_picture_number =
1586  s->coded_picture_number++;
1587  }
1588  }
1589  }
1590 no_output_pic:
1591  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1592 
1593  if (s->reordered_input_picture[0]) {
1594  s->reordered_input_picture[0]->reference =
1595  s->reordered_input_picture[0]->f->pict_type !=
1596  AV_PICTURE_TYPE_B ? 3 : 0;
1597 
1598  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1599  return ret;
1600 
1601  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1602  // input is a shared pix, so we can't modify it -> allocate a new
1603  // one & ensure that the shared one is reuseable
1604 
1605  Picture *pic;
1606  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1607  if (i < 0)
1608  return i;
1609  pic = &s->picture[i];
1610 
1611  pic->reference = s->reordered_input_picture[0]->reference;
1612  if (alloc_picture(s, pic, 0) < 0) {
1613  return -1;
1614  }
1615 
1616  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1617  if (ret < 0)
1618  return ret;
1619 
1620  /* mark us unused / free shared pic */
1621  av_frame_unref(s->reordered_input_picture[0]->f);
1622  s->reordered_input_picture[0]->shared = 0;
1623 
1624  s->current_picture_ptr = pic;
1625  } else {
1626  // input is not a shared pix -> reuse buffer for current_pix
1627  s->current_picture_ptr = s->reordered_input_picture[0];
1628  for (i = 0; i < 4; i++) {
1629  if (s->new_picture.f->data[i])
1630  s->new_picture.f->data[i] += INPLACE_OFFSET;
1631  }
1632  }
1633  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1634  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1635  s->current_picture_ptr)) < 0)
1636  return ret;
1637 
1638  s->picture_number = s->new_picture.f->display_picture_number;
1639  }
1640  return 0;
1641 }
1642 
1644 {
1645  if (s->unrestricted_mv &&
1646  s->current_picture.reference &&
1647  !s->intra_only) {
1648  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1649  int hshift = desc->log2_chroma_w;
1650  int vshift = desc->log2_chroma_h;
1651  s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1652  s->current_picture.f->linesize[0],
1653  s->h_edge_pos, s->v_edge_pos,
1655  EDGE_TOP | EDGE_BOTTOM);
1656  s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1657  s->current_picture.f->linesize[1],
1658  s->h_edge_pos >> hshift,
1659  s->v_edge_pos >> vshift,
1660  EDGE_WIDTH >> hshift,
1661  EDGE_WIDTH >> vshift,
1662  EDGE_TOP | EDGE_BOTTOM);
1663  s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1664  s->current_picture.f->linesize[2],
1665  s->h_edge_pos >> hshift,
1666  s->v_edge_pos >> vshift,
1667  EDGE_WIDTH >> hshift,
1668  EDGE_WIDTH >> vshift,
1669  EDGE_TOP | EDGE_BOTTOM);
1670  }
1671 
1672  emms_c();
1673 
1674  s->last_pict_type = s->pict_type;
1675  s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1676  if (s->pict_type!= AV_PICTURE_TYPE_B)
1677  s->last_non_b_pict_type = s->pict_type;
1678 
1679 #if FF_API_CODED_FRAME
1681  av_frame_unref(s->avctx->coded_frame);
1682  av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1684 #endif
1685 #if FF_API_ERROR_FRAME
1687  memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1688  sizeof(s->current_picture.encoding_error));
1690 #endif
1691 }
1692 
1694 {
1695  int intra, i;
1696 
1697  for (intra = 0; intra < 2; intra++) {
1698  if (s->dct_count[intra] > (1 << 16)) {
1699  for (i = 0; i < 64; i++) {
1700  s->dct_error_sum[intra][i] >>= 1;
1701  }
1702  s->dct_count[intra] >>= 1;
1703  }
1704 
1705  for (i = 0; i < 64; i++) {
1706  s->dct_offset[intra][i] = (s->noise_reduction *
1707  s->dct_count[intra] +
1708  s->dct_error_sum[intra][i] / 2) /
1709  (s->dct_error_sum[intra][i] + 1);
1710  }
1711  }
1712 }
1713 
1715 {
1716  int ret;
1717 
1718  /* mark & release old frames */
1719  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1720  s->last_picture_ptr != s->next_picture_ptr &&
1721  s->last_picture_ptr->f->buf[0]) {
1722  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1723  }
1724 
1725  s->current_picture_ptr->f->pict_type = s->pict_type;
1726  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1727 
1728  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1729  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1730  s->current_picture_ptr)) < 0)
1731  return ret;
1732 
1733  if (s->pict_type != AV_PICTURE_TYPE_B) {
1734  s->last_picture_ptr = s->next_picture_ptr;
1735  if (!s->droppable)
1736  s->next_picture_ptr = s->current_picture_ptr;
1737  }
1738 
1739  if (s->last_picture_ptr) {
1740  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1741  if (s->last_picture_ptr->f->buf[0] &&
1742  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1743  s->last_picture_ptr)) < 0)
1744  return ret;
1745  }
1746  if (s->next_picture_ptr) {
1747  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1748  if (s->next_picture_ptr->f->buf[0] &&
1749  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1750  s->next_picture_ptr)) < 0)
1751  return ret;
1752  }
1753 
1754  if (s->picture_structure!= PICT_FRAME) {
1755  int i;
1756  for (i = 0; i < 4; i++) {
1757  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1758  s->current_picture.f->data[i] +=
1759  s->current_picture.f->linesize[i];
1760  }
1761  s->current_picture.f->linesize[i] *= 2;
1762  s->last_picture.f->linesize[i] *= 2;
1763  s->next_picture.f->linesize[i] *= 2;
1764  }
1765  }
1766 
1767  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1768  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1769  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1770  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1771  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1772  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1773  } else {
1774  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1775  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1776  }
1777 
1778  if (s->dct_error_sum) {
1779  av_assert2(s->noise_reduction && s->encoding);
1781  }
1782 
1783  return 0;
1784 }
1785 
1787  const AVFrame *pic_arg, int *got_packet)
1788 {
1790  int i, stuffing_count, ret;
1791  int context_count = s->slice_context_count;
1792 
1793  s->vbv_ignore_qmax = 0;
1794 
1795  s->picture_in_gop_number++;
1796 
1797  if (load_input_picture(s, pic_arg) < 0)
1798  return -1;
1799 
1800  if (select_input_picture(s) < 0) {
1801  return -1;
1802  }
1803 
1804  /* output? */
1805  if (s->new_picture.f->data[0]) {
1806  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1807  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1808  :
1809  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1810  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1811  return ret;
1812  if (s->mb_info) {
1813  s->mb_info_ptr = av_packet_new_side_data(pkt,
1815  s->mb_width*s->mb_height*12);
1816  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1817  }
1818 
1819  for (i = 0; i < context_count; i++) {
1820  int start_y = s->thread_context[i]->start_mb_y;
1821  int end_y = s->thread_context[i]-> end_mb_y;
1822  int h = s->mb_height;
1823  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1824  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1825 
1826  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1827  }
1828 
1829  s->pict_type = s->new_picture.f->pict_type;
1830  //emms_c();
1831  ret = frame_start(s);
1832  if (ret < 0)
1833  return ret;
1834 vbv_retry:
1835  ret = encode_picture(s, s->picture_number);
1836  if (growing_buffer) {
1837  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1838  pkt->data = s->pb.buf;
1840  }
1841  if (ret < 0)
1842  return -1;
1843 
1844 #if FF_API_STAT_BITS
1846  avctx->header_bits = s->header_bits;
1847  avctx->mv_bits = s->mv_bits;
1848  avctx->misc_bits = s->misc_bits;
1849  avctx->i_tex_bits = s->i_tex_bits;
1850  avctx->p_tex_bits = s->p_tex_bits;
1851  avctx->i_count = s->i_count;
1852  // FIXME f/b_count in avctx
1853  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1854  avctx->skip_count = s->skip_count;
1856 #endif
1857 
1858  frame_end(s);
1859 
1860  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1861  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1862 
1863  if (avctx->rc_buffer_size) {
1864  RateControlContext *rcc = &s->rc_context;
1865  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1866  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1867  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1868 
1869  if (put_bits_count(&s->pb) > max_size &&
1870  s->lambda < s->lmax) {
1871  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1872  (s->qscale + 1) / s->qscale);
1873  if (s->adaptive_quant) {
1874  int i;
1875  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1876  s->lambda_table[i] =
1877  FFMAX(s->lambda_table[i] + min_step,
1878  s->lambda_table[i] * (s->qscale + 1) /
1879  s->qscale);
1880  }
1881  s->mb_skipped = 0; // done in frame_start()
1882  // done in encode_picture() so we must undo it
1883  if (s->pict_type == AV_PICTURE_TYPE_P) {
1884  if (s->flipflop_rounding ||
1885  s->codec_id == AV_CODEC_ID_H263P ||
1886  s->codec_id == AV_CODEC_ID_MPEG4)
1887  s->no_rounding ^= 1;
1888  }
1889  if (s->pict_type != AV_PICTURE_TYPE_B) {
1890  s->time_base = s->last_time_base;
1891  s->last_non_b_time = s->time - s->pp_time;
1892  }
1893  for (i = 0; i < context_count; i++) {
1894  PutBitContext *pb = &s->thread_context[i]->pb;
1895  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1896  }
1897  s->vbv_ignore_qmax = 1;
1898  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1899  goto vbv_retry;
1900  }
1901 
1903  }
1904 
1907 
1908  for (i = 0; i < 4; i++) {
1909  s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1910  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1911  }
1912  ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1913  s->current_picture_ptr->encoding_error,
1914  (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1915  s->pict_type);
1916 
1918  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1919  s->misc_bits + s->i_tex_bits +
1920  s->p_tex_bits);
1921  flush_put_bits(&s->pb);
1922  s->frame_bits = put_bits_count(&s->pb);
1923 
1924  stuffing_count = ff_vbv_update(s, s->frame_bits);
1925  s->stuffing_bits = 8*stuffing_count;
1926  if (stuffing_count) {
1927  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1928  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1929  return -1;
1930  }
1931 
1932  switch (s->codec_id) {
1935  while (stuffing_count--) {
1936  put_bits(&s->pb, 8, 0);
1937  }
1938  break;
1939  case AV_CODEC_ID_MPEG4:
1940  put_bits(&s->pb, 16, 0);
1941  put_bits(&s->pb, 16, 0x1C3);
1942  stuffing_count -= 4;
1943  while (stuffing_count--) {
1944  put_bits(&s->pb, 8, 0xFF);
1945  }
1946  break;
1947  default:
1948  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1949  }
1950  flush_put_bits(&s->pb);
1951  s->frame_bits = put_bits_count(&s->pb);
1952  }
1953 
1954  /* update MPEG-1/2 vbv_delay for CBR */
1955  if (avctx->rc_max_rate &&
1957  s->out_format == FMT_MPEG1 &&
1958  90000LL * (avctx->rc_buffer_size - 1) <=
1959  avctx->rc_max_rate * 0xFFFFLL) {
1960  AVCPBProperties *props;
1961  size_t props_size;
1962 
1963  int vbv_delay, min_delay;
1964  double inbits = avctx->rc_max_rate *
1966  int minbits = s->frame_bits - 8 *
1967  (s->vbv_delay_ptr - s->pb.buf - 1);
1968  double bits = s->rc_context.buffer_index + minbits - inbits;
1969 
1970  if (bits < 0)
1972  "Internal error, negative bits\n");
1973 
1974  av_assert1(s->repeat_first_field == 0);
1975 
1976  vbv_delay = bits * 90000 / avctx->rc_max_rate;
1977  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1978  avctx->rc_max_rate;
1979 
1980  vbv_delay = FFMAX(vbv_delay, min_delay);
1981 
1982  av_assert0(vbv_delay < 0xFFFF);
1983 
1984  s->vbv_delay_ptr[0] &= 0xF8;
1985  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1986  s->vbv_delay_ptr[1] = vbv_delay >> 5;
1987  s->vbv_delay_ptr[2] &= 0x07;
1988  s->vbv_delay_ptr[2] |= vbv_delay << 3;
1989 
1990  props = av_cpb_properties_alloc(&props_size);
1991  if (!props)
1992  return AVERROR(ENOMEM);
1993  props->vbv_delay = vbv_delay * 300;
1994 
1996  (uint8_t*)props, props_size);
1997  if (ret < 0) {
1998  av_freep(&props);
1999  return ret;
2000  }
2001 
2002 #if FF_API_VBV_DELAY
2004  avctx->vbv_delay = vbv_delay * 300;
2006 #endif
2007  }
2008  s->total_bits += s->frame_bits;
2009 #if FF_API_STAT_BITS
2011  avctx->frame_bits = s->frame_bits;
2013 #endif
2014 
2015 
2016  pkt->pts = s->current_picture.f->pts;
2017  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2018  if (!s->current_picture.f->coded_picture_number)
2019  pkt->dts = pkt->pts - s->dts_delta;
2020  else
2021  pkt->dts = s->reordered_pts;
2022  s->reordered_pts = pkt->pts;
2023  } else
2024  pkt->dts = pkt->pts;
2025  if (s->current_picture.f->key_frame)
2027  if (s->mb_info)
2029  } else {
2030  s->frame_bits = 0;
2031  }
2032 
2033  /* release non-reference frames */
2034  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2035  if (!s->picture[i].reference)
2036  ff_mpeg_unref_picture(avctx, &s->picture[i]);
2037  }
2038 
2039  av_assert1((s->frame_bits & 7) == 0);
2040 
2041  pkt->size = s->frame_bits / 8;
2042  *got_packet = !!pkt->size;
2043  return 0;
2044 }
2045 
2047  int n, int threshold)
2048 {
2049  static const char tab[64] = {
2050  3, 2, 2, 1, 1, 1, 1, 1,
2051  1, 1, 1, 1, 1, 1, 1, 1,
2052  1, 1, 1, 1, 1, 1, 1, 1,
2053  0, 0, 0, 0, 0, 0, 0, 0,
2054  0, 0, 0, 0, 0, 0, 0, 0,
2055  0, 0, 0, 0, 0, 0, 0, 0,
2056  0, 0, 0, 0, 0, 0, 0, 0,
2057  0, 0, 0, 0, 0, 0, 0, 0
2058  };
2059  int score = 0;
2060  int run = 0;
2061  int i;
2062  int16_t *block = s->block[n];
2063  const int last_index = s->block_last_index[n];
2064  int skip_dc;
2065 
2066  if (threshold < 0) {
2067  skip_dc = 0;
2068  threshold = -threshold;
2069  } else
2070  skip_dc = 1;
2071 
2072  /* Are all we could set to zero already zero? */
2073  if (last_index <= skip_dc - 1)
2074  return;
2075 
2076  for (i = 0; i <= last_index; i++) {
2077  const int j = s->intra_scantable.permutated[i];
2078  const int level = FFABS(block[j]);
2079  if (level == 1) {
2080  if (skip_dc && i == 0)
2081  continue;
2082  score += tab[run];
2083  run = 0;
2084  } else if (level > 1) {
2085  return;
2086  } else {
2087  run++;
2088  }
2089  }
2090  if (score >= threshold)
2091  return;
2092  for (i = skip_dc; i <= last_index; i++) {
2093  const int j = s->intra_scantable.permutated[i];
2094  block[j] = 0;
2095  }
2096  if (block[0])
2097  s->block_last_index[n] = 0;
2098  else
2099  s->block_last_index[n] = -1;
2100 }
2101 
2102 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2103  int last_index)
2104 {
2105  int i;
2106  const int maxlevel = s->max_qcoeff;
2107  const int minlevel = s->min_qcoeff;
2108  int overflow = 0;
2109 
2110  if (s->mb_intra) {
2111  i = 1; // skip clipping of intra dc
2112  } else
2113  i = 0;
2114 
2115  for (; i <= last_index; i++) {
2116  const int j = s->intra_scantable.permutated[i];
2117  int level = block[j];
2118 
2119  if (level > maxlevel) {
2120  level = maxlevel;
2121  overflow++;
2122  } else if (level < minlevel) {
2123  level = minlevel;
2124  overflow++;
2125  }
2126 
2127  block[j] = level;
2128  }
2129 
2130  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2131  av_log(s->avctx, AV_LOG_INFO,
2132  "warning, clipping %d dct coefficients to %d..%d\n",
2133  overflow, minlevel, maxlevel);
2134 }
2135 
2136 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2137 {
2138  int x, y;
2139  // FIXME optimize
2140  for (y = 0; y < 8; y++) {
2141  for (x = 0; x < 8; x++) {
2142  int x2, y2;
2143  int sum = 0;
2144  int sqr = 0;
2145  int count = 0;
2146 
2147  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2148  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2149  int v = ptr[x2 + y2 * stride];
2150  sum += v;
2151  sqr += v * v;
2152  count++;
2153  }
2154  }
2155  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2156  }
2157  }
2158 }
2159 
2161  int motion_x, int motion_y,
2162  int mb_block_height,
2163  int mb_block_width,
2164  int mb_block_count)
2165 {
2166  int16_t weight[12][64];
2167  int16_t orig[12][64];
2168  const int mb_x = s->mb_x;
2169  const int mb_y = s->mb_y;
2170  int i;
2171  int skip_dct[12];
2172  int dct_offset = s->linesize * 8; // default for progressive frames
2173  int uv_dct_offset = s->uvlinesize * 8;
2174  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2175  ptrdiff_t wrap_y, wrap_c;
2176 
2177  for (i = 0; i < mb_block_count; i++)
2178  skip_dct[i] = s->skipdct;
2179 
2180  if (s->adaptive_quant) {
2181  const int last_qp = s->qscale;
2182  const int mb_xy = mb_x + mb_y * s->mb_stride;
2183 
2184  s->lambda = s->lambda_table[mb_xy];
2185  update_qscale(s);
2186 
2187  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2188  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2189  s->dquant = s->qscale - last_qp;
2190 
2191  if (s->out_format == FMT_H263) {
2192  s->dquant = av_clip(s->dquant, -2, 2);
2193 
2194  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2195  if (!s->mb_intra) {
2196  if (s->pict_type == AV_PICTURE_TYPE_B) {
2197  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2198  s->dquant = 0;
2199  }
2200  if (s->mv_type == MV_TYPE_8X8)
2201  s->dquant = 0;
2202  }
2203  }
2204  }
2205  }
2206  ff_set_qscale(s, last_qp + s->dquant);
2207  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2208  ff_set_qscale(s, s->qscale + s->dquant);
2209 
2210  wrap_y = s->linesize;
2211  wrap_c = s->uvlinesize;
2212  ptr_y = s->new_picture.f->data[0] +
2213  (mb_y * 16 * wrap_y) + mb_x * 16;
2214  ptr_cb = s->new_picture.f->data[1] +
2215  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2216  ptr_cr = s->new_picture.f->data[2] +
2217  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2218 
2219  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2220  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2221  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2222  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2223  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2224  wrap_y, wrap_y,
2225  16, 16, mb_x * 16, mb_y * 16,
2226  s->width, s->height);
2227  ptr_y = ebuf;
2228  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2229  wrap_c, wrap_c,
2230  mb_block_width, mb_block_height,
2231  mb_x * mb_block_width, mb_y * mb_block_height,
2232  cw, ch);
2233  ptr_cb = ebuf + 16 * wrap_y;
2234  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2235  wrap_c, wrap_c,
2236  mb_block_width, mb_block_height,
2237  mb_x * mb_block_width, mb_y * mb_block_height,
2238  cw, ch);
2239  ptr_cr = ebuf + 16 * wrap_y + 16;
2240  }
2241 
2242  if (s->mb_intra) {
2243  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2244  int progressive_score, interlaced_score;
2245 
2246  s->interlaced_dct = 0;
2247  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2248  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2249  NULL, wrap_y, 8) - 400;
2250 
2251  if (progressive_score > 0) {
2252  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2253  NULL, wrap_y * 2, 8) +
2254  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2255  NULL, wrap_y * 2, 8);
2256  if (progressive_score > interlaced_score) {
2257  s->interlaced_dct = 1;
2258 
2259  dct_offset = wrap_y;
2260  uv_dct_offset = wrap_c;
2261  wrap_y <<= 1;
2262  if (s->chroma_format == CHROMA_422 ||
2263  s->chroma_format == CHROMA_444)
2264  wrap_c <<= 1;
2265  }
2266  }
2267  }
2268 
2269  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2270  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2271  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2272  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2273 
2274  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2275  skip_dct[4] = 1;
2276  skip_dct[5] = 1;
2277  } else {
2278  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2279  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2280  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2281  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2282  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2283  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2284  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2285  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2286  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2287  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2288  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2289  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2290  }
2291  }
2292  } else {
2293  op_pixels_func (*op_pix)[4];
2294  qpel_mc_func (*op_qpix)[16];
2295  uint8_t *dest_y, *dest_cb, *dest_cr;
2296 
2297  dest_y = s->dest[0];
2298  dest_cb = s->dest[1];
2299  dest_cr = s->dest[2];
2300 
2301  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2302  op_pix = s->hdsp.put_pixels_tab;
2303  op_qpix = s->qdsp.put_qpel_pixels_tab;
2304  } else {
2305  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2306  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2307  }
2308 
2309  if (s->mv_dir & MV_DIR_FORWARD) {
2310  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2311  s->last_picture.f->data,
2312  op_pix, op_qpix);
2313  op_pix = s->hdsp.avg_pixels_tab;
2314  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2315  }
2316  if (s->mv_dir & MV_DIR_BACKWARD) {
2317  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2318  s->next_picture.f->data,
2319  op_pix, op_qpix);
2320  }
2321 
2322  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2323  int progressive_score, interlaced_score;
2324 
2325  s->interlaced_dct = 0;
2326  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2327  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2328  ptr_y + wrap_y * 8,
2329  wrap_y, 8) - 400;
2330 
2331  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2332  progressive_score -= 400;
2333 
2334  if (progressive_score > 0) {
2335  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2336  wrap_y * 2, 8) +
2337  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2338  ptr_y + wrap_y,
2339  wrap_y * 2, 8);
2340 
2341  if (progressive_score > interlaced_score) {
2342  s->interlaced_dct = 1;
2343 
2344  dct_offset = wrap_y;
2345  uv_dct_offset = wrap_c;
2346  wrap_y <<= 1;
2347  if (s->chroma_format == CHROMA_422)
2348  wrap_c <<= 1;
2349  }
2350  }
2351  }
2352 
2353  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2354  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2355  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2356  dest_y + dct_offset, wrap_y);
2357  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2358  dest_y + dct_offset + 8, wrap_y);
2359 
2360  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2361  skip_dct[4] = 1;
2362  skip_dct[5] = 1;
2363  } else {
2364  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2365  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2366  if (!s->chroma_y_shift) { /* 422 */
2367  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2368  dest_cb + uv_dct_offset, wrap_c);
2369  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2370  dest_cr + uv_dct_offset, wrap_c);
2371  }
2372  }
2373  /* pre quantization */
2374  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2375  2 * s->qscale * s->qscale) {
2376  // FIXME optimize
2377  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2378  skip_dct[0] = 1;
2379  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2380  skip_dct[1] = 1;
2381  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2382  wrap_y, 8) < 20 * s->qscale)
2383  skip_dct[2] = 1;
2384  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2385  wrap_y, 8) < 20 * s->qscale)
2386  skip_dct[3] = 1;
2387  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2388  skip_dct[4] = 1;
2389  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2390  skip_dct[5] = 1;
2391  if (!s->chroma_y_shift) { /* 422 */
2392  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2393  dest_cb + uv_dct_offset,
2394  wrap_c, 8) < 20 * s->qscale)
2395  skip_dct[6] = 1;
2396  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2397  dest_cr + uv_dct_offset,
2398  wrap_c, 8) < 20 * s->qscale)
2399  skip_dct[7] = 1;
2400  }
2401  }
2402  }
2403 
2404  if (s->quantizer_noise_shaping) {
2405  if (!skip_dct[0])
2406  get_visual_weight(weight[0], ptr_y , wrap_y);
2407  if (!skip_dct[1])
2408  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2409  if (!skip_dct[2])
2410  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2411  if (!skip_dct[3])
2412  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2413  if (!skip_dct[4])
2414  get_visual_weight(weight[4], ptr_cb , wrap_c);
2415  if (!skip_dct[5])
2416  get_visual_weight(weight[5], ptr_cr , wrap_c);
2417  if (!s->chroma_y_shift) { /* 422 */
2418  if (!skip_dct[6])
2419  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2420  wrap_c);
2421  if (!skip_dct[7])
2422  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2423  wrap_c);
2424  }
2425  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2426  }
2427 
2428  /* DCT & quantize */
2429  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2430  {
2431  for (i = 0; i < mb_block_count; i++) {
2432  if (!skip_dct[i]) {
2433  int overflow;
2434  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2435  // FIXME we could decide to change to quantizer instead of
2436  // clipping
2437  // JS: I don't think that would be a good idea it could lower
2438  // quality instead of improve it. Just INTRADC clipping
2439  // deserves changes in quantizer
2440  if (overflow)
2441  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2442  } else
2443  s->block_last_index[i] = -1;
2444  }
2445  if (s->quantizer_noise_shaping) {
2446  for (i = 0; i < mb_block_count; i++) {
2447  if (!skip_dct[i]) {
2448  s->block_last_index[i] =
2449  dct_quantize_refine(s, s->block[i], weight[i],
2450  orig[i], i, s->qscale);
2451  }
2452  }
2453  }
2454 
2455  if (s->luma_elim_threshold && !s->mb_intra)
2456  for (i = 0; i < 4; i++)
2457  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2458  if (s->chroma_elim_threshold && !s->mb_intra)
2459  for (i = 4; i < mb_block_count; i++)
2460  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2461 
2462  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2463  for (i = 0; i < mb_block_count; i++) {
2464  if (s->block_last_index[i] == -1)
2465  s->coded_score[i] = INT_MAX / 256;
2466  }
2467  }
2468  }
2469 
2470  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2471  s->block_last_index[4] =
2472  s->block_last_index[5] = 0;
2473  s->block[4][0] =
2474  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2475  if (!s->chroma_y_shift) { /* 422 / 444 */
2476  for (i=6; i<12; i++) {
2477  s->block_last_index[i] = 0;
2478  s->block[i][0] = s->block[4][0];
2479  }
2480  }
2481  }
2482 
2483  // non c quantize code returns incorrect block_last_index FIXME
2484  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2485  for (i = 0; i < mb_block_count; i++) {
2486  int j;
2487  if (s->block_last_index[i] > 0) {
2488  for (j = 63; j > 0; j--) {
2489  if (s->block[i][s->intra_scantable.permutated[j]])
2490  break;
2491  }
2492  s->block_last_index[i] = j;
2493  }
2494  }
2495  }
2496 
2497  /* huffman encode */
2498  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2501  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2502  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2503  break;
2504  case AV_CODEC_ID_MPEG4:
2505  if (CONFIG_MPEG4_ENCODER)
2506  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2507  break;
2508  case AV_CODEC_ID_MSMPEG4V2:
2509  case AV_CODEC_ID_MSMPEG4V3:
2510  case AV_CODEC_ID_WMV1:
2512  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2513  break;
2514  case AV_CODEC_ID_WMV2:
2515  if (CONFIG_WMV2_ENCODER)
2516  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2517  break;
2518  case AV_CODEC_ID_H261:
2519  if (CONFIG_H261_ENCODER)
2520  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2521  break;
2522  case AV_CODEC_ID_H263:
2523  case AV_CODEC_ID_H263P:
2524  case AV_CODEC_ID_FLV1:
2525  case AV_CODEC_ID_RV10:
2526  case AV_CODEC_ID_RV20:
2527  if (CONFIG_H263_ENCODER)
2528  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2529  break;
2530 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2531  case AV_CODEC_ID_MJPEG:
2532  case AV_CODEC_ID_AMV:
2533  ff_mjpeg_encode_mb(s, s->block);
2534  break;
2535 #endif
2536  case AV_CODEC_ID_SPEEDHQ:
2537  if (CONFIG_SPEEDHQ_ENCODER)
2538  ff_speedhq_encode_mb(s, s->block);
2539  break;
2540  default:
2541  av_assert1(0);
2542  }
2543 }
2544 
2545 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2546 {
2547  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2548  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2549  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2550 }
2551 
2553  int i;
2554 
2555  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2556 
2557  /* MPEG-1 */
2558  d->mb_skip_run= s->mb_skip_run;
2559  for(i=0; i<3; i++)
2560  d->last_dc[i] = s->last_dc[i];
2561 
2562  /* statistics */
2563  d->mv_bits= s->mv_bits;
2564  d->i_tex_bits= s->i_tex_bits;
2565  d->p_tex_bits= s->p_tex_bits;
2566  d->i_count= s->i_count;
2567  d->f_count= s->f_count;
2568  d->b_count= s->b_count;
2569  d->skip_count= s->skip_count;
2570  d->misc_bits= s->misc_bits;
2571  d->last_bits= 0;
2572 
2573  d->mb_skipped= 0;
2574  d->qscale= s->qscale;
2575  d->dquant= s->dquant;
2576 
2577  d->esc3_level_length= s->esc3_level_length;
2578 }
2579 
2581  int i;
2582 
2583  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2584  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2585 
2586  /* MPEG-1 */
2587  d->mb_skip_run= s->mb_skip_run;
2588  for(i=0; i<3; i++)
2589  d->last_dc[i] = s->last_dc[i];
2590 
2591  /* statistics */
2592  d->mv_bits= s->mv_bits;
2593  d->i_tex_bits= s->i_tex_bits;
2594  d->p_tex_bits= s->p_tex_bits;
2595  d->i_count= s->i_count;
2596  d->f_count= s->f_count;
2597  d->b_count= s->b_count;
2598  d->skip_count= s->skip_count;
2599  d->misc_bits= s->misc_bits;
2600 
2601  d->mb_intra= s->mb_intra;
2602  d->mb_skipped= s->mb_skipped;
2603  d->mv_type= s->mv_type;
2604  d->mv_dir= s->mv_dir;
2605  d->pb= s->pb;
2606  if(s->data_partitioning){
2607  d->pb2= s->pb2;
2608  d->tex_pb= s->tex_pb;
2609  }
2610  d->block= s->block;
2611  for(i=0; i<8; i++)
2612  d->block_last_index[i]= s->block_last_index[i];
2613  d->interlaced_dct= s->interlaced_dct;
2614  d->qscale= s->qscale;
2615 
2616  d->esc3_level_length= s->esc3_level_length;
2617 }
2618 
2619 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2621  int *dmin, int *next_block, int motion_x, int motion_y)
2622 {
2623  int score;
2624  uint8_t *dest_backup[3];
2625 
2626  copy_context_before_encode(s, backup, type);
2627 
2628  s->block= s->blocks[*next_block];
2629  s->pb= pb[*next_block];
2630  if(s->data_partitioning){
2631  s->pb2 = pb2 [*next_block];
2632  s->tex_pb= tex_pb[*next_block];
2633  }
2634 
2635  if(*next_block){
2636  memcpy(dest_backup, s->dest, sizeof(s->dest));
2637  s->dest[0] = s->sc.rd_scratchpad;
2638  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2639  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2640  av_assert0(s->linesize >= 32); //FIXME
2641  }
2642 
2643  encode_mb(s, motion_x, motion_y);
2644 
2645  score= put_bits_count(&s->pb);
2646  if(s->data_partitioning){
2647  score+= put_bits_count(&s->pb2);
2648  score+= put_bits_count(&s->tex_pb);
2649  }
2650 
2651  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2652  ff_mpv_reconstruct_mb(s, s->block);
2653 
2654  score *= s->lambda2;
2655  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2656  }
2657 
2658  if(*next_block){
2659  memcpy(s->dest, dest_backup, sizeof(s->dest));
2660  }
2661 
2662  if(score<*dmin){
2663  *dmin= score;
2664  *next_block^=1;
2665 
2667  }
2668 }
2669 
2670 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2671  const uint32_t *sq = ff_square_tab + 256;
2672  int acc=0;
2673  int x,y;
2674 
2675  if(w==16 && h==16)
2676  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2677  else if(w==8 && h==8)
2678  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2679 
2680  for(y=0; y<h; y++){
2681  for(x=0; x<w; x++){
2682  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2683  }
2684  }
2685 
2686  av_assert2(acc>=0);
2687 
2688  return acc;
2689 }
2690 
2691 static int sse_mb(MpegEncContext *s){
2692  int w= 16;
2693  int h= 16;
2694 
2695  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2696  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2697 
2698  if(w==16 && h==16)
2699  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2700  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2701  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2702  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2703  }else{
2704  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2705  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2706  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2707  }
2708  else
2709  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2710  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2711  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2712 }
2713 
2715  MpegEncContext *s= *(void**)arg;
2716 
2717 
2718  s->me.pre_pass=1;
2719  s->me.dia_size= s->avctx->pre_dia_size;
2720  s->first_slice_line=1;
2721  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2722  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2723  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2724  }
2725  s->first_slice_line=0;
2726  }
2727 
2728  s->me.pre_pass=0;
2729 
2730  return 0;
2731 }
2732 
2734  MpegEncContext *s= *(void**)arg;
2735 
2736  s->me.dia_size= s->avctx->dia_size;
2737  s->first_slice_line=1;
2738  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2739  s->mb_x=0; //for block init below
2741  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2742  s->block_index[0]+=2;
2743  s->block_index[1]+=2;
2744  s->block_index[2]+=2;
2745  s->block_index[3]+=2;
2746 
2747  /* compute motion vector & mb_type and store in context */
2748  if(s->pict_type==AV_PICTURE_TYPE_B)
2749  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2750  else
2751  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2752  }
2753  s->first_slice_line=0;
2754  }
2755  return 0;
2756 }
2757 
2758 static int mb_var_thread(AVCodecContext *c, void *arg){
2759  MpegEncContext *s= *(void**)arg;
2760  int mb_x, mb_y;
2761 
2762  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2763  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2764  int xx = mb_x * 16;
2765  int yy = mb_y * 16;
2766  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2767  int varc;
2768  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2769 
2770  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2771  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2772 
2773  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2774  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2775  s->me.mb_var_sum_temp += varc;
2776  }
2777  }
2778  return 0;
2779 }
2780 
2782  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2783  if(s->partitioned_frame){
2785  }
2786 
2787  ff_mpeg4_stuffing(&s->pb);
2788  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2789  s->out_format == FMT_MJPEG) {
2791  } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2793  }
2794 
2795  flush_put_bits(&s->pb);
2796 
2797  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2798  s->misc_bits+= get_bits_diff(s);
2799 }
2800 
2802 {
2803  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2804  int offset = put_bits_count(&s->pb);
2805  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2806  int gobn = s->mb_y / s->gob_index;
2807  int pred_x, pred_y;
2808  if (CONFIG_H263_ENCODER)
2809  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2810  bytestream_put_le32(&ptr, offset);
2811  bytestream_put_byte(&ptr, s->qscale);
2812  bytestream_put_byte(&ptr, gobn);
2813  bytestream_put_le16(&ptr, mba);
2814  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2815  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2816  /* 4MV not implemented */
2817  bytestream_put_byte(&ptr, 0); /* hmv2 */
2818  bytestream_put_byte(&ptr, 0); /* vmv2 */
2819 }
2820 
2821 static void update_mb_info(MpegEncContext *s, int startcode)
2822 {
2823  if (!s->mb_info)
2824  return;
2825  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2826  s->mb_info_size += 12;
2827  s->prev_mb_info = s->last_mb_info;
2828  }
2829  if (startcode) {
2830  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2831  /* This might have incremented mb_info_size above, and we return without
2832  * actually writing any info into that slot yet. But in that case,
2833  * this will be called again at the start of the after writing the
2834  * start code, actually writing the mb info. */
2835  return;
2836  }
2837 
2838  s->last_mb_info = put_bytes_count(&s->pb, 0);
2839  if (!s->mb_info_size)
2840  s->mb_info_size += 12;
2841  write_mb_info(s);
2842 }
2843 
2844 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2845 {
2846  if (put_bytes_left(&s->pb, 0) < threshold
2847  && s->slice_context_count == 1
2848  && s->pb.buf == s->avctx->internal->byte_buffer) {
2849  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2850  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2851 
2852  uint8_t *new_buffer = NULL;
2853  int new_buffer_size = 0;
2854 
2855  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2856  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2857  return AVERROR(ENOMEM);
2858  }
2859 
2860  emms_c();
2861 
2862  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2863  s->avctx->internal->byte_buffer_size + size_increase);
2864  if (!new_buffer)
2865  return AVERROR(ENOMEM);
2866 
2867  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2868  av_free(s->avctx->internal->byte_buffer);
2869  s->avctx->internal->byte_buffer = new_buffer;
2870  s->avctx->internal->byte_buffer_size = new_buffer_size;
2871  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2872  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2873  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2874  }
2875  if (put_bytes_left(&s->pb, 0) < threshold)
2876  return AVERROR(EINVAL);
2877  return 0;
2878 }
2879 
2880 static int encode_thread(AVCodecContext *c, void *arg){
2881  MpegEncContext *s= *(void**)arg;
2882  int mb_x, mb_y, mb_y_order;
2883  int chr_h= 16>>s->chroma_y_shift;
2884  int i, j;
2885  MpegEncContext best_s = { 0 }, backup_s;
2886  uint8_t bit_buf[2][MAX_MB_BYTES];
2887  uint8_t bit_buf2[2][MAX_MB_BYTES];
2888  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2889  PutBitContext pb[2], pb2[2], tex_pb[2];
2890 
2891  for(i=0; i<2; i++){
2892  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2893  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2894  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2895  }
2896 
2897  s->last_bits= put_bits_count(&s->pb);
2898  s->mv_bits=0;
2899  s->misc_bits=0;
2900  s->i_tex_bits=0;
2901  s->p_tex_bits=0;
2902  s->i_count=0;
2903  s->f_count=0;
2904  s->b_count=0;
2905  s->skip_count=0;
2906 
2907  for(i=0; i<3; i++){
2908  /* init last dc values */
2909  /* note: quant matrix value (8) is implied here */
2910  s->last_dc[i] = 128 << s->intra_dc_precision;
2911 
2912  s->current_picture.encoding_error[i] = 0;
2913  }
2914  if(s->codec_id==AV_CODEC_ID_AMV){
2915  s->last_dc[0] = 128*8/13;
2916  s->last_dc[1] = 128*8/14;
2917  s->last_dc[2] = 128*8/14;
2918  }
2919  s->mb_skip_run = 0;
2920  memset(s->last_mv, 0, sizeof(s->last_mv));
2921 
2922  s->last_mv_dir = 0;
2923 
2924  switch(s->codec_id){
2925  case AV_CODEC_ID_H263:
2926  case AV_CODEC_ID_H263P:
2927  case AV_CODEC_ID_FLV1:
2928  if (CONFIG_H263_ENCODER)
2929  s->gob_index = H263_GOB_HEIGHT(s->height);
2930  break;
2931  case AV_CODEC_ID_MPEG4:
2932  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2934  break;
2935  }
2936 
2937  s->resync_mb_x=0;
2938  s->resync_mb_y=0;
2939  s->first_slice_line = 1;
2940  s->ptr_lastgob = s->pb.buf;
2941  for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2942  if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2943  int first_in_slice;
2944  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2945  if (first_in_slice && mb_y_order != s->start_mb_y)
2947  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2948  } else {
2949  mb_y = mb_y_order;
2950  }
2951  s->mb_x=0;
2952  s->mb_y= mb_y;
2953 
2954  ff_set_qscale(s, s->qscale);
2956 
2957  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2958  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2959  int mb_type= s->mb_type[xy];
2960 // int d;
2961  int dmin= INT_MAX;
2962  int dir;
2963  int size_increase = s->avctx->internal->byte_buffer_size/4
2964  + s->mb_width*MAX_MB_BYTES;
2965 
2967  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2968  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2969  return -1;
2970  }
2971  if(s->data_partitioning){
2972  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2973  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2974  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2975  return -1;
2976  }
2977  }
2978 
2979  s->mb_x = mb_x;
2980  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2982 
2983  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2985  xy= s->mb_y*s->mb_stride + s->mb_x;
2986  mb_type= s->mb_type[xy];
2987  }
2988 
2989  /* write gob / video packet header */
2990  if(s->rtp_mode){
2991  int current_packet_size, is_gob_start;
2992 
2993  current_packet_size = put_bytes_count(&s->pb, 1)
2994  - (s->ptr_lastgob - s->pb.buf);
2995 
2996  is_gob_start = s->rtp_payload_size &&
2997  current_packet_size >= s->rtp_payload_size &&
2998  mb_y + mb_x > 0;
2999 
3000  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3001 
3002  switch(s->codec_id){
3003  case AV_CODEC_ID_H263:
3004  case AV_CODEC_ID_H263P:
3005  if(!s->h263_slice_structured)
3006  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3007  break;
3009  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3011  if(s->mb_skip_run) is_gob_start=0;
3012  break;
3013  case AV_CODEC_ID_MJPEG:
3014  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3015  break;
3016  }
3017 
3018  if(is_gob_start){
3019  if(s->start_mb_y != mb_y || mb_x!=0){
3020  write_slice_end(s);
3021 
3022  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3024  }
3025  }
3026 
3027  av_assert2((put_bits_count(&s->pb)&7) == 0);
3028  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3029 
3030  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3031  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3032  int d = 100 / s->error_rate;
3033  if(r % d == 0){
3034  current_packet_size=0;
3035  s->pb.buf_ptr= s->ptr_lastgob;
3036  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3037  }
3038  }
3039 
3040 #if FF_API_RTP_CALLBACK
3042  if (s->avctx->rtp_callback){
3043  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3044  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3045  }
3047 #endif
3048  update_mb_info(s, 1);
3049 
3050  switch(s->codec_id){
3051  case AV_CODEC_ID_MPEG4:
3052  if (CONFIG_MPEG4_ENCODER) {
3055  }
3056  break;
3059  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3062  }
3063  break;
3064  case AV_CODEC_ID_H263:
3065  case AV_CODEC_ID_H263P:
3066  if (CONFIG_H263_ENCODER)
3068  break;
3069  }
3070 
3071  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3072  int bits= put_bits_count(&s->pb);
3073  s->misc_bits+= bits - s->last_bits;
3074  s->last_bits= bits;
3075  }
3076 
3077  s->ptr_lastgob += current_packet_size;
3078  s->first_slice_line=1;
3079  s->resync_mb_x=mb_x;
3080  s->resync_mb_y=mb_y;
3081  }
3082  }
3083 
3084  if( (s->resync_mb_x == s->mb_x)
3085  && s->resync_mb_y+1 == s->mb_y){
3086  s->first_slice_line=0;
3087  }
3088 
3089  s->mb_skipped=0;
3090  s->dquant=0; //only for QP_RD
3091 
3092  update_mb_info(s, 0);
3093 
3094  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3095  int next_block=0;
3096  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3097 
3098  copy_context_before_encode(&backup_s, s, -1);
3099  backup_s.pb= s->pb;
3100  best_s.data_partitioning= s->data_partitioning;
3101  best_s.partitioned_frame= s->partitioned_frame;
3102  if(s->data_partitioning){
3103  backup_s.pb2= s->pb2;
3104  backup_s.tex_pb= s->tex_pb;
3105  }
3106 
3108  s->mv_dir = MV_DIR_FORWARD;
3109  s->mv_type = MV_TYPE_16X16;
3110  s->mb_intra= 0;
3111  s->mv[0][0][0] = s->p_mv_table[xy][0];
3112  s->mv[0][0][1] = s->p_mv_table[xy][1];
3113  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3114  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3115  }
3117  s->mv_dir = MV_DIR_FORWARD;
3118  s->mv_type = MV_TYPE_FIELD;
3119  s->mb_intra= 0;
3120  for(i=0; i<2; i++){
3121  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3122  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3123  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3124  }
3125  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3126  &dmin, &next_block, 0, 0);
3127  }
3129  s->mv_dir = MV_DIR_FORWARD;
3130  s->mv_type = MV_TYPE_16X16;
3131  s->mb_intra= 0;
3132  s->mv[0][0][0] = 0;
3133  s->mv[0][0][1] = 0;
3134  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3135  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3136  }
3138  s->mv_dir = MV_DIR_FORWARD;
3139  s->mv_type = MV_TYPE_8X8;
3140  s->mb_intra= 0;
3141  for(i=0; i<4; i++){
3142  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3143  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3144  }
3145  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3146  &dmin, &next_block, 0, 0);
3147  }
3149  s->mv_dir = MV_DIR_FORWARD;
3150  s->mv_type = MV_TYPE_16X16;
3151  s->mb_intra= 0;
3152  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3153  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3154  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3155  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3156  }
3158  s->mv_dir = MV_DIR_BACKWARD;
3159  s->mv_type = MV_TYPE_16X16;
3160  s->mb_intra= 0;
3161  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3162  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3163  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3164  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3165  }
3167  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3168  s->mv_type = MV_TYPE_16X16;
3169  s->mb_intra= 0;
3170  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3171  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3172  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3173  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3174  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3175  &dmin, &next_block, 0, 0);
3176  }
3178  s->mv_dir = MV_DIR_FORWARD;
3179  s->mv_type = MV_TYPE_FIELD;
3180  s->mb_intra= 0;
3181  for(i=0; i<2; i++){
3182  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3183  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3184  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3185  }
3186  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3187  &dmin, &next_block, 0, 0);
3188  }
3190  s->mv_dir = MV_DIR_BACKWARD;
3191  s->mv_type = MV_TYPE_FIELD;
3192  s->mb_intra= 0;
3193  for(i=0; i<2; i++){
3194  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3195  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3196  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3197  }
3198  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3199  &dmin, &next_block, 0, 0);
3200  }
3202  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3203  s->mv_type = MV_TYPE_FIELD;
3204  s->mb_intra= 0;
3205  for(dir=0; dir<2; dir++){
3206  for(i=0; i<2; i++){
3207  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3208  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3209  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3210  }
3211  }
3212  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3213  &dmin, &next_block, 0, 0);
3214  }
3216  s->mv_dir = 0;
3217  s->mv_type = MV_TYPE_16X16;
3218  s->mb_intra= 1;
3219  s->mv[0][0][0] = 0;
3220  s->mv[0][0][1] = 0;
3221  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3222  &dmin, &next_block, 0, 0);
3223  if(s->h263_pred || s->h263_aic){
3224  if(best_s.mb_intra)
3225  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3226  else
3227  ff_clean_intra_table_entries(s); //old mode?
3228  }
3229  }
3230 
3231  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3232  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3233  const int last_qp= backup_s.qscale;
3234  int qpi, qp, dc[6];
3235  int16_t ac[6][16];
3236  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3237  static const int dquant_tab[4]={-1,1,-2,2};
3238  int storecoefs = s->mb_intra && s->dc_val[0];
3239 
3240  av_assert2(backup_s.dquant == 0);
3241 
3242  //FIXME intra
3243  s->mv_dir= best_s.mv_dir;
3244  s->mv_type = MV_TYPE_16X16;
3245  s->mb_intra= best_s.mb_intra;
3246  s->mv[0][0][0] = best_s.mv[0][0][0];
3247  s->mv[0][0][1] = best_s.mv[0][0][1];
3248  s->mv[1][0][0] = best_s.mv[1][0][0];
3249  s->mv[1][0][1] = best_s.mv[1][0][1];
3250 
3251  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3252  for(; qpi<4; qpi++){
3253  int dquant= dquant_tab[qpi];
3254  qp= last_qp + dquant;
3255  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3256  continue;
3257  backup_s.dquant= dquant;
3258  if(storecoefs){
3259  for(i=0; i<6; i++){
3260  dc[i]= s->dc_val[0][ s->block_index[i] ];
3261  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3262  }
3263  }
3264 
3265  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3266  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3267  if(best_s.qscale != qp){
3268  if(storecoefs){
3269  for(i=0; i<6; i++){
3270  s->dc_val[0][ s->block_index[i] ]= dc[i];
3271  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3272  }
3273  }
3274  }
3275  }
3276  }
3277  }
3278  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3279  int mx= s->b_direct_mv_table[xy][0];
3280  int my= s->b_direct_mv_table[xy][1];
3281 
3282  backup_s.dquant = 0;
3283  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3284  s->mb_intra= 0;
3285  ff_mpeg4_set_direct_mv(s, mx, my);
3286  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3287  &dmin, &next_block, mx, my);
3288  }
3289  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3290  backup_s.dquant = 0;
3291  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3292  s->mb_intra= 0;
3293  ff_mpeg4_set_direct_mv(s, 0, 0);
3294  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3295  &dmin, &next_block, 0, 0);
3296  }
3297  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3298  int coded=0;
3299  for(i=0; i<6; i++)
3300  coded |= s->block_last_index[i];
3301  if(coded){
3302  int mx,my;
3303  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3304  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3305  mx=my=0; //FIXME find the one we actually used
3306  ff_mpeg4_set_direct_mv(s, mx, my);
3307  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3308  mx= s->mv[1][0][0];
3309  my= s->mv[1][0][1];
3310  }else{
3311  mx= s->mv[0][0][0];
3312  my= s->mv[0][0][1];
3313  }
3314 
3315  s->mv_dir= best_s.mv_dir;
3316  s->mv_type = best_s.mv_type;
3317  s->mb_intra= 0;
3318 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3319  s->mv[0][0][1] = best_s.mv[0][0][1];
3320  s->mv[1][0][0] = best_s.mv[1][0][0];
3321  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3322  backup_s.dquant= 0;
3323  s->skipdct=1;
3324  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3325  &dmin, &next_block, mx, my);
3326  s->skipdct=0;
3327  }
3328  }
3329 
3330  s->current_picture.qscale_table[xy] = best_s.qscale;
3331 
3332  copy_context_after_encode(s, &best_s, -1);
3333 
3334  pb_bits_count= put_bits_count(&s->pb);
3335  flush_put_bits(&s->pb);
3336  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3337  s->pb= backup_s.pb;
3338 
3339  if(s->data_partitioning){
3340  pb2_bits_count= put_bits_count(&s->pb2);
3341  flush_put_bits(&s->pb2);
3342  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3343  s->pb2= backup_s.pb2;
3344 
3345  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3346  flush_put_bits(&s->tex_pb);
3347  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3348  s->tex_pb= backup_s.tex_pb;
3349  }
3350  s->last_bits= put_bits_count(&s->pb);
3351 
3352  if (CONFIG_H263_ENCODER &&
3353  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3355 
3356  if(next_block==0){ //FIXME 16 vs linesize16
3357  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3358  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3359  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3360  }
3361 
3362  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3363  ff_mpv_reconstruct_mb(s, s->block);
3364  } else {
3365  int motion_x = 0, motion_y = 0;
3366  s->mv_type=MV_TYPE_16X16;
3367  // only one MB-Type possible
3368 
3369  switch(mb_type){
3371  s->mv_dir = 0;
3372  s->mb_intra= 1;
3373  motion_x= s->mv[0][0][0] = 0;
3374  motion_y= s->mv[0][0][1] = 0;
3375  break;
3377  s->mv_dir = MV_DIR_FORWARD;
3378  s->mb_intra= 0;
3379  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3380  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3381  break;
3383  s->mv_dir = MV_DIR_FORWARD;
3384  s->mv_type = MV_TYPE_FIELD;
3385  s->mb_intra= 0;
3386  for(i=0; i<2; i++){
3387  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3388  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3389  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3390  }
3391  break;
3393  s->mv_dir = MV_DIR_FORWARD;
3394  s->mv_type = MV_TYPE_8X8;
3395  s->mb_intra= 0;
3396  for(i=0; i<4; i++){
3397  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3398  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3399  }
3400  break;
3402  if (CONFIG_MPEG4_ENCODER) {
3404  s->mb_intra= 0;
3405  motion_x=s->b_direct_mv_table[xy][0];
3406  motion_y=s->b_direct_mv_table[xy][1];
3407  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3408  }
3409  break;
3411  if (CONFIG_MPEG4_ENCODER) {
3413  s->mb_intra= 0;
3414  ff_mpeg4_set_direct_mv(s, 0, 0);
3415  }
3416  break;
3418  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3419  s->mb_intra= 0;
3420  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3421  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3422  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3423  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3424  break;
3426  s->mv_dir = MV_DIR_BACKWARD;
3427  s->mb_intra= 0;
3428  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3429  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3430  break;
3432  s->mv_dir = MV_DIR_FORWARD;
3433  s->mb_intra= 0;
3434  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3435  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3436  break;
3438  s->mv_dir = MV_DIR_FORWARD;
3439  s->mv_type = MV_TYPE_FIELD;
3440  s->mb_intra= 0;
3441  for(i=0; i<2; i++){
3442  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3443  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3444  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3445  }
3446  break;
3448  s->mv_dir = MV_DIR_BACKWARD;
3449  s->mv_type = MV_TYPE_FIELD;
3450  s->mb_intra= 0;
3451  for(i=0; i<2; i++){
3452  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3453  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3454  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3455  }
3456  break;
3458  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3459  s->mv_type = MV_TYPE_FIELD;
3460  s->mb_intra= 0;
3461  for(dir=0; dir<2; dir++){
3462  for(i=0; i<2; i++){
3463  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3464  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3465  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3466  }
3467  }
3468  break;
3469  default:
3470  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3471  }
3472 
3473  encode_mb(s, motion_x, motion_y);
3474 
3475  // RAL: Update last macroblock type
3476  s->last_mv_dir = s->mv_dir;
3477 
3478  if (CONFIG_H263_ENCODER &&
3479  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3481 
3482  ff_mpv_reconstruct_mb(s, s->block);
3483  }
3484 
3485  /* clean the MV table in IPS frames for direct mode in B-frames */
3486  if(s->mb_intra /* && I,P,S_TYPE */){
3487  s->p_mv_table[xy][0]=0;
3488  s->p_mv_table[xy][1]=0;
3489  }
3490 
3491  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3492  int w= 16;
3493  int h= 16;
3494 
3495  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3496  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3497 
3498  s->current_picture.encoding_error[0] += sse(
3499  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3500  s->dest[0], w, h, s->linesize);
3501  s->current_picture.encoding_error[1] += sse(
3502  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3503  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3504  s->current_picture.encoding_error[2] += sse(
3505  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3506  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3507  }
3508  if(s->loop_filter){
3509  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3511  }
3512  ff_dlog(s->avctx, "MB %d %d bits\n",
3513  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3514  }
3515  }
3516 
3517  //not beautiful here but we must write it before flushing so it has to be here
3518  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3520 
3521  write_slice_end(s);
3522 
3523 #if FF_API_RTP_CALLBACK
3525  /* Send the last GOB if RTP */
3526  if (s->avctx->rtp_callback) {
3527  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3528  int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3529  /* Call the RTP callback to send the last GOB */
3530  emms_c();
3531  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3532  }
3534 #endif
3535 
3536  return 0;
3537 }
3538 
3539 #define MERGE(field) dst->field += src->field; src->field=0
3541  MERGE(me.scene_change_score);
3542  MERGE(me.mc_mb_var_sum_temp);
3543  MERGE(me.mb_var_sum_temp);
3544 }
3545 
3547  int i;
3548 
3549  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3550  MERGE(dct_count[1]);
3551  MERGE(mv_bits);
3552  MERGE(i_tex_bits);
3553  MERGE(p_tex_bits);
3554  MERGE(i_count);
3555  MERGE(f_count);
3556  MERGE(b_count);
3557  MERGE(skip_count);
3558  MERGE(misc_bits);
3559  MERGE(er.error_count);
3564 
3565  if (dst->noise_reduction){
3566  for(i=0; i<64; i++){
3567  MERGE(dct_error_sum[0][i]);
3568  MERGE(dct_error_sum[1][i]);
3569  }
3570  }
3571 
3572  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3573  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3574  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3575  flush_put_bits(&dst->pb);
3576 }
3577 
3578 static int estimate_qp(MpegEncContext *s, int dry_run){
3579  if (s->next_lambda){
3580  s->current_picture_ptr->f->quality =
3581  s->current_picture.f->quality = s->next_lambda;
3582  if(!dry_run) s->next_lambda= 0;
3583  } else if (!s->fixed_qscale) {
3584  int quality = ff_rate_estimate_qscale(s, dry_run);
3585  s->current_picture_ptr->f->quality =
3586  s->current_picture.f->quality = quality;
3587  if (s->current_picture.f->quality < 0)
3588  return -1;
3589  }
3590 
3591  if(s->adaptive_quant){
3592  switch(s->codec_id){
3593  case AV_CODEC_ID_MPEG4:
3594  if (CONFIG_MPEG4_ENCODER)
3596  break;
3597  case AV_CODEC_ID_H263:
3598  case AV_CODEC_ID_H263P:
3599  case AV_CODEC_ID_FLV1:
3600  if (CONFIG_H263_ENCODER)
3602  break;
3603  default:
3605  }
3606 
3607  s->lambda= s->lambda_table[0];
3608  //FIXME broken
3609  }else
3610  s->lambda = s->current_picture.f->quality;
3611  update_qscale(s);
3612  return 0;
3613 }
3614 
3615 /* must be called before writing the header */
3617  av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3618  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3619 
3620  if(s->pict_type==AV_PICTURE_TYPE_B){
3621  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3622  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3623  }else{
3624  s->pp_time= s->time - s->last_non_b_time;
3625  s->last_non_b_time= s->time;
3626  av_assert1(s->picture_number==0 || s->pp_time > 0);
3627  }
3628 }
3629 
3631 {
3632  int i, ret;
3633  int bits;
3634  int context_count = s->slice_context_count;
3635 
3636  s->picture_number = picture_number;
3637 
3638  /* Reset the average MB variance */
3639  s->me.mb_var_sum_temp =
3640  s->me.mc_mb_var_sum_temp = 0;
3641 
3642  /* we need to initialize some time vars before we can encode B-frames */
3643  // RAL: Condition added for MPEG1VIDEO
3644  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3646  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3648 
3649  s->me.scene_change_score=0;
3650 
3651 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3652 
3653  if(s->pict_type==AV_PICTURE_TYPE_I){
3654  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3655  else s->no_rounding=0;
3656  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3657  if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3658  s->no_rounding ^= 1;
3659  }
3660 
3661  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3662  if (estimate_qp(s,1) < 0)
3663  return -1;
3665  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3666  if(s->pict_type==AV_PICTURE_TYPE_B)
3667  s->lambda= s->last_lambda_for[s->pict_type];
3668  else
3669  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3670  update_qscale(s);
3671  }
3672 
3673  if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3674  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3675  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3676  s->q_chroma_intra_matrix = s->q_intra_matrix;
3677  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3678  }
3679 
3680  s->mb_intra=0; //for the rate distortion & bit compare functions
3681  for(i=1; i<context_count; i++){
3682  ret = ff_update_duplicate_context(s->thread_context[i], s);
3683  if (ret < 0)
3684  return ret;
3685  }
3686 
3687  if(ff_init_me(s)<0)
3688  return -1;
3689 
3690  /* Estimate motion for every MB */
3691  if(s->pict_type != AV_PICTURE_TYPE_I){
3692  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3693  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3694  if (s->pict_type != AV_PICTURE_TYPE_B) {
3695  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3696  s->me_pre == 2) {
3697  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3698  }
3699  }
3700 
3701  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3702  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3703  /* I-Frame */
3704  for(i=0; i<s->mb_stride*s->mb_height; i++)
3705  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3706 
3707  if(!s->fixed_qscale){
3708  /* finding spatial complexity for I-frame rate control */
3709  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3710  }
3711  }
3712  for(i=1; i<context_count; i++){
3713  merge_context_after_me(s, s->thread_context[i]);
3714  }
3715  s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3716  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3717  emms_c();
3718 
3719  if (s->me.scene_change_score > s->scenechange_threshold &&
3720  s->pict_type == AV_PICTURE_TYPE_P) {
3721  s->pict_type= AV_PICTURE_TYPE_I;
3722  for(i=0; i<s->mb_stride*s->mb_height; i++)
3723  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3724  if(s->msmpeg4_version >= 3)
3725  s->no_rounding=1;
3726  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3727  s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3728  }
3729 
3730  if(!s->umvplus){
3731  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3732  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3733 
3734  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3735  int a,b;
3736  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3737  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3738  s->f_code= FFMAX3(s->f_code, a, b);
3739  }
3740 
3742  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3743  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3744  int j;
3745  for(i=0; i<2; i++){
3746  for(j=0; j<2; j++)
3747  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3748  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3749  }
3750  }
3751  }
3752 
3753  if(s->pict_type==AV_PICTURE_TYPE_B){
3754  int a, b;
3755 
3756  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3757  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3758  s->f_code = FFMAX(a, b);
3759 
3760  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3761  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3762  s->b_code = FFMAX(a, b);
3763 
3764  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3765  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3766  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3767  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3768  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3769  int dir, j;
3770  for(dir=0; dir<2; dir++){
3771  for(i=0; i<2; i++){
3772  for(j=0; j<2; j++){
3775  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3776  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3777  }
3778  }
3779  }
3780  }
3781  }
3782  }
3783 
3784  if (estimate_qp(s, 0) < 0)
3785  return -1;
3786 
3787  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3788  s->pict_type == AV_PICTURE_TYPE_I &&
3789  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3790  s->qscale= 3; //reduce clipping problems
3791 
3792  if (s->out_format == FMT_MJPEG) {
3793  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3794  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3795 
3796  if (s->avctx->intra_matrix) {
3797  chroma_matrix =
3798  luma_matrix = s->avctx->intra_matrix;
3799  }
3800  if (s->avctx->chroma_intra_matrix)
3801  chroma_matrix = s->avctx->chroma_intra_matrix;
3802 
3803  /* for mjpeg, we do include qscale in the matrix */
3804  for(i=1;i<64;i++){
3805  int j = s->idsp.idct_permutation[i];
3806 
3807  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3808  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3809  }
3810  s->y_dc_scale_table=
3811  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3812  s->chroma_intra_matrix[0] =
3813  s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3814  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3815  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3816  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3817  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3818  s->qscale= 8;
3819  }
3820  if(s->codec_id == AV_CODEC_ID_AMV){
3821  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3822  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3823  for(i=1;i<64;i++){
3824  int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3825 
3826  s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3827  s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3828  }
3829  s->y_dc_scale_table= y;
3830  s->c_dc_scale_table= c;
3831  s->intra_matrix[0] = 13;
3832  s->chroma_intra_matrix[0] = 14;
3833  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3834  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3835  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3836  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3837  s->qscale= 8;
3838  }
3839 
3840  if (s->out_format == FMT_SPEEDHQ) {
3841  s->y_dc_scale_table=
3842  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3843  }
3844 
3845  //FIXME var duplication
3846  s->current_picture_ptr->f->key_frame =
3847  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3848  s->current_picture_ptr->f->pict_type =
3849  s->current_picture.f->pict_type = s->pict_type;
3850 
3851  if (s->current_picture.f->key_frame)
3852  s->picture_in_gop_number=0;
3853 
3854  s->mb_x = s->mb_y = 0;
3855  s->last_bits= put_bits_count(&s->pb);
3856  switch(s->out_format) {
3857 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3858  case FMT_MJPEG:
3859  /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
3860  if (!CONFIG_MJPEG_ENCODER || s->huffman != HUFFMAN_TABLE_OPTIMAL)
3861  ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3862  s->pred, s->intra_matrix, s->chroma_intra_matrix);
3863  break;
3864 #endif
3865  case FMT_SPEEDHQ:
3866  if (CONFIG_SPEEDHQ_ENCODER)
3868  break;
3869  case FMT_H261:
3870  if (CONFIG_H261_ENCODER)
3872  break;
3873  case FMT_H263:
3874  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3876  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3878  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3880  if (ret < 0)
3881  return ret;
3882  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3884  if (ret < 0)
3885  return ret;
3886  }
3887  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3889  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3891  else if (CONFIG_H263_ENCODER)
3893  break;
3894  case FMT_MPEG1:
3895  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3897  break;
3898  default:
3899  av_assert0(0);
3900  }
3901  bits= put_bits_count(&s->pb);
3902  s->header_bits= bits - s->last_bits;
3903 
3904  for(i=1; i<context_count; i++){
3905  update_duplicate_context_after_me(s->thread_context[i], s);
3906  }
3907  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3908  for(i=1; i<context_count; i++){
3909  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3910  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3911  merge_context_after_encode(s, s->thread_context[i]);
3912  }
3913  emms_c();
3914  return 0;
3915 }
3916 
3917 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3918  const int intra= s->mb_intra;
3919  int i;
3920 
3921  s->dct_count[intra]++;
3922 
3923  for(i=0; i<64; i++){
3924  int level= block[i];
3925 
3926  if(level){
3927  if(level>0){
3928  s->dct_error_sum[intra][i] += level;
3929  level -= s->dct_offset[intra][i];
3930  if(level<0) level=0;
3931  }else{
3932  s->dct_error_sum[intra][i] -= level;
3933  level += s->dct_offset[intra][i];
3934  if(level>0) level=0;
3935  }
3936  block[i]= level;
3937  }
3938  }
3939 }
3940 
3942  int16_t *block, int n,
3943  int qscale, int *overflow){
3944  const int *qmat;
3945  const uint16_t *matrix;
3946  const uint8_t *scantable;
3947  const uint8_t *perm_scantable;
3948  int max=0;
3949  unsigned int threshold1, threshold2;
3950  int bias=0;
3951  int run_tab[65];
3952  int level_tab[65];
3953  int score_tab[65];
3954  int survivor[65];
3955  int survivor_count;
3956  int last_run=0;
3957  int last_level=0;
3958  int last_score= 0;
3959  int last_i;
3960  int coeff[2][64];
3961  int coeff_count[64];
3962  int qmul, qadd, start_i, last_non_zero, i, dc;
3963  const int esc_length= s->ac_esc_length;
3964  uint8_t * length;
3965  uint8_t * last_length;
3966  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3967  int mpeg2_qscale;
3968 
3969  s->fdsp.fdct(block);
3970 
3971  if(s->dct_error_sum)
3972  s->denoise_dct(s, block);
3973  qmul= qscale*16;
3974  qadd= ((qscale-1)|1)*8;
3975 
3976  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3977  else mpeg2_qscale = qscale << 1;
3978 
3979  if (s->mb_intra) {
3980  int q;
3981  scantable= s->intra_scantable.scantable;
3982  perm_scantable= s->intra_scantable.permutated;
3983  if (!s->h263_aic) {
3984  if (n < 4)
3985  q = s->y_dc_scale;
3986  else
3987  q = s->c_dc_scale;
3988  q = q << 3;
3989  } else{
3990  /* For AIC we skip quant/dequant of INTRADC */
3991  q = 1 << 3;
3992  qadd=0;
3993  }
3994 
3995  /* note: block[0] is assumed to be positive */
3996  block[0] = (block[0] + (q >> 1)) / q;
3997  start_i = 1;
3998  last_non_zero = 0;
3999  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4000  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4001  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4002  bias= 1<<(QMAT_SHIFT-1);
4003 
4004  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4005  length = s->intra_chroma_ac_vlc_length;
4006  last_length= s->intra_chroma_ac_vlc_last_length;
4007  } else {
4008  length = s->intra_ac_vlc_length;
4009  last_length= s->intra_ac_vlc_last_length;
4010  }
4011  } else {
4012  scantable= s->inter_scantable.scantable;
4013  perm_scantable= s->inter_scantable.permutated;
4014  start_i = 0;
4015  last_non_zero = -1;
4016  qmat = s->q_inter_matrix[qscale];
4017  matrix = s->inter_matrix;
4018  length = s->inter_ac_vlc_length;
4019  last_length= s->inter_ac_vlc_last_length;
4020  }
4021  last_i= start_i;
4022 
4023  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4024  threshold2= (threshold1<<1);
4025 
4026  for(i=63; i>=start_i; i--) {
4027  const int j = scantable[i];
4028  int level = block[j] * qmat[j];
4029 
4030  if(((unsigned)(level+threshold1))>threshold2){
4031  last_non_zero = i;
4032  break;
4033  }
4034  }
4035 
4036  for(i=start_i; i<=last_non_zero; i++) {
4037  const int j = scantable[i];
4038  int level = block[j] * qmat[j];
4039 
4040 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4041 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4042  if(((unsigned)(level+threshold1))>threshold2){
4043  if(level>0){
4044  level= (bias + level)>>QMAT_SHIFT;
4045  coeff[0][i]= level;
4046  coeff[1][i]= level-1;
4047 // coeff[2][k]= level-2;
4048  }else{
4049  level= (bias - level)>>QMAT_SHIFT;
4050  coeff[0][i]= -level;
4051  coeff[1][i]= -level+1;
4052 // coeff[2][k]= -level+2;
4053  }
4054  coeff_count[i]= FFMIN(level, 2);
4055  av_assert2(coeff_count[i]);
4056  max |=level;
4057  }else{
4058  coeff[0][i]= (level>>31)|1;
4059  coeff_count[i]= 1;
4060  }
4061  }
4062 
4063  *overflow= s->max_qcoeff < max; //overflow might have happened
4064 
4065  if(last_non_zero < start_i){
4066  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4067  return last_non_zero;
4068  }
4069 
4070  score_tab[start_i]= 0;
4071  survivor[0]= start_i;
4072  survivor_count= 1;
4073 
4074  for(i=start_i; i<=last_non_zero; i++){
4075  int level_index, j, zero_distortion;
4076  int dct_coeff= FFABS(block[ scantable[i] ]);
4077  int best_score=256*256*256*120;
4078 
4079  if (s->fdsp.fdct == ff_fdct_ifast)
4080  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4081  zero_distortion= dct_coeff*dct_coeff;
4082 
4083  for(level_index=0; level_index < coeff_count[i]; level_index++){
4084  int distortion;
4085  int level= coeff[level_index][i];
4086  const int alevel= FFABS(level);
4087  int unquant_coeff;
4088 
4089  av_assert2(level);
4090 
4091  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4092  unquant_coeff= alevel*qmul + qadd;
4093  } else if(s->out_format == FMT_MJPEG) {
4094  j = s->idsp.idct_permutation[scantable[i]];
4095  unquant_coeff = alevel * matrix[j] * 8;
4096  }else{ // MPEG-1
4097  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4098  if(s->mb_intra){
4099  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4100  unquant_coeff = (unquant_coeff - 1) | 1;
4101  }else{
4102  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4103  unquant_coeff = (unquant_coeff - 1) | 1;
4104  }
4105  unquant_coeff<<= 3;
4106  }
4107 
4108  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4109  level+=64;
4110  if((level&(~127)) == 0){
4111  for(j=survivor_count-1; j>=0; j--){
4112  int run= i - survivor[j];
4113  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4114  score += score_tab[i-run];
4115 
4116  if(score < best_score){
4117  best_score= score;
4118  run_tab[i+1]= run;
4119  level_tab[i+1]= level-64;
4120  }
4121  }
4122 
4123  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4124  for(j=survivor_count-1; j>=0; j--){
4125  int run= i - survivor[j];
4126  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4127  score += score_tab[i-run];
4128  if(score < last_score){
4129  last_score= score;
4130  last_run= run;
4131  last_level= level-64;
4132  last_i= i+1;
4133  }
4134  }
4135  }
4136  }else{
4137  distortion += esc_length*lambda;
4138  for(j=survivor_count-1; j>=0; j--){
4139  int run= i - survivor[j];
4140  int score= distortion + score_tab[i-run];
4141 
4142  if(score < best_score){
4143  best_score= score;
4144  run_tab[i+1]= run;
4145  level_tab[i+1]= level-64;
4146  }
4147  }
4148 
4149  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4150  for(j=survivor_count-1; j>=0; j--){
4151  int run= i - survivor[j];
4152  int score= distortion + score_tab[i-run];
4153  if(score < last_score){
4154  last_score= score;
4155  last_run= run;
4156  last_level= level-64;
4157  last_i= i+1;
4158  }
4159  }
4160  }
4161  }
4162  }
4163 
4164  score_tab[i+1]= best_score;
4165 
4166  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4167  if(last_non_zero <= 27){
4168  for(; survivor_count; survivor_count--){
4169  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4170  break;
4171  }
4172  }else{
4173  for(; survivor_count; survivor_count--){
4174  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4175  break;
4176  }
4177  }
4178 
4179  survivor[ survivor_count++ ]= i+1;
4180  }
4181 
4182  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4183  last_score= 256*256*256*120;
4184  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4185  int score= score_tab[i];
4186  if (i)
4187  score += lambda * 2; // FIXME more exact?
4188 
4189</