FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <stdint.h>
37 
38 #include "libavutil/emms.h"
39 #include "libavutil/internal.h"
40 #include "libavutil/intmath.h"
41 #include "libavutil/mathematics.h"
42 #include "libavutil/mem_internal.h"
43 #include "libavutil/opt.h"
44 #include "libavutil/thread.h"
45 #include "avcodec.h"
46 #include "encode.h"
47 #include "idctdsp.h"
48 #include "mpeg12codecs.h"
49 #include "mpeg12data.h"
50 #include "mpeg12enc.h"
51 #include "mpegvideo.h"
52 #include "mpegvideodata.h"
53 #include "mpegvideoenc.h"
54 #include "h261enc.h"
55 #include "h263.h"
56 #include "h263data.h"
57 #include "h263enc.h"
58 #include "mjpegenc_common.h"
59 #include "mathops.h"
60 #include "mpegutils.h"
61 #include "mjpegenc.h"
62 #include "speedhqenc.h"
63 #include "msmpeg4enc.h"
64 #include "pixblockdsp.h"
65 #include "qpeldsp.h"
66 #include "faandct.h"
67 #include "aandcttab.h"
68 #include "flvenc.h"
69 #include "mpeg4video.h"
70 #include "mpeg4videodata.h"
71 #include "mpeg4videoenc.h"
72 #include "internal.h"
73 #include "bytestream.h"
74 #include "wmv2enc.h"
75 #include "rv10enc.h"
76 #include "packet_internal.h"
77 #include <limits.h>
78 #include "sp5x.h"
79 
80 #define QUANT_BIAS_SHIFT 8
81 
82 #define QMAT_SHIFT_MMX 16
83 #define QMAT_SHIFT 21
84 
85 static int encode_picture(MpegEncContext *s);
86 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
87 static int sse_mb(MpegEncContext *s);
88 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
89 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
90 
91 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
92 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
93 
94 static const AVOption mpv_generic_options[] = {
97  { NULL },
98 };
99 
101  .class_name = "generic mpegvideo encoder",
102  .item_name = av_default_item_name,
103  .option = mpv_generic_options,
104  .version = LIBAVUTIL_VERSION_INT,
105 };
106 
107 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
108  uint16_t (*qmat16)[2][64],
109  const uint16_t *quant_matrix,
110  int bias, int qmin, int qmax, int intra)
111 {
112  FDCTDSPContext *fdsp = &s->fdsp;
113  int qscale;
114  int shift = 0;
115 
116  for (qscale = qmin; qscale <= qmax; qscale++) {
117  int i;
118  int qscale2;
119 
120  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
121  else qscale2 = qscale << 1;
122 
123  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
124 #if CONFIG_FAANDCT
125  fdsp->fdct == ff_faandct ||
126 #endif /* CONFIG_FAANDCT */
128  for (i = 0; i < 64; i++) {
129  const int j = s->idsp.idct_permutation[i];
130  int64_t den = (int64_t) qscale2 * quant_matrix[j];
131  /* 16 <= qscale * quant_matrix[i] <= 7905
132  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
133  * 19952 <= x <= 249205026
134  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
135  * 3444240 >= (1 << 36) / (x) >= 275 */
136 
137  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
138  }
139  } else if (fdsp->fdct == ff_fdct_ifast) {
140  for (i = 0; i < 64; i++) {
141  const int j = s->idsp.idct_permutation[i];
142  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
143  /* 16 <= qscale * quant_matrix[i] <= 7905
144  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
145  * 19952 <= x <= 249205026
146  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
147  * 3444240 >= (1 << 36) / (x) >= 275 */
148 
149  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
150  }
151  } else {
152  for (i = 0; i < 64; i++) {
153  const int j = s->idsp.idct_permutation[i];
154  int64_t den = (int64_t) qscale2 * quant_matrix[j];
155  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
156  * Assume x = qscale * quant_matrix[i]
157  * So 16 <= x <= 7905
158  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
159  * so 32768 >= (1 << 19) / (x) >= 67 */
160  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
161  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
162  // (qscale * quant_matrix[i]);
163  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
164 
165  if (qmat16[qscale][0][i] == 0 ||
166  qmat16[qscale][0][i] == 128 * 256)
167  qmat16[qscale][0][i] = 128 * 256 - 1;
168  qmat16[qscale][1][i] =
169  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
170  qmat16[qscale][0][i]);
171  }
172  }
173 
174  for (i = intra; i < 64; i++) {
175  int64_t max = 8191;
176  if (fdsp->fdct == ff_fdct_ifast) {
177  max = (8191LL * ff_aanscales[i]) >> 14;
178  }
179  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
180  shift++;
181  }
182  }
183  }
184  if (shift) {
185  av_log(s->avctx, AV_LOG_INFO,
186  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
187  QMAT_SHIFT - shift);
188  }
189 }
190 
191 static inline void update_qscale(MpegEncContext *s)
192 {
193  if (s->q_scale_type == 1 && 0) {
194  int i;
195  int bestdiff=INT_MAX;
196  int best = 1;
197 
198  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
199  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
200  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
201  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
202  continue;
203  if (diff < bestdiff) {
204  bestdiff = diff;
205  best = i;
206  }
207  }
208  s->qscale = best;
209  } else {
210  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
211  (FF_LAMBDA_SHIFT + 7);
212  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
213  }
214 
215  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
217 }
218 
220 {
221  int i;
222 
223  if (matrix) {
224  put_bits(pb, 1, 1);
225  for (i = 0; i < 64; i++) {
227  }
228  } else
229  put_bits(pb, 1, 0);
230 }
231 
232 /**
233  * init s->current_picture.qscale_table from s->lambda_table
234  */
236 {
237  int8_t * const qscale_table = s->current_picture.qscale_table;
238  int i;
239 
240  for (i = 0; i < s->mb_num; i++) {
241  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
242  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
243  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
244  s->avctx->qmax);
245  }
246 }
247 
249  const MpegEncContext *src)
250 {
251 #define COPY(a) dst->a= src->a
252  COPY(pict_type);
254  COPY(f_code);
255  COPY(b_code);
256  COPY(qscale);
257  COPY(lambda);
258  COPY(lambda2);
259  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
260  COPY(progressive_frame); // FIXME don't set in encode_header
261  COPY(partitioned_frame); // FIXME don't set in encode_header
262 #undef COPY
263 }
264 
265 static void mpv_encode_init_static(void)
266 {
267  for (int i = -16; i < 16; i++)
268  default_fcode_tab[i + MAX_MV] = 1;
269 }
270 
271 /**
272  * Set the given MpegEncContext to defaults for encoding.
273  * the changed fields will not depend upon the prior state of the MpegEncContext.
274  */
276 {
277  static AVOnce init_static_once = AV_ONCE_INIT;
278 
280 
281  ff_thread_once(&init_static_once, mpv_encode_init_static);
282 
283  s->me.mv_penalty = default_mv_penalty;
284  s->fcode_tab = default_fcode_tab;
285 
286  s->input_picture_number = 0;
287  s->picture_in_gop_number = 0;
288 }
289 
291 {
292 #if ARCH_X86
294 #endif
295 
296  if (CONFIG_H263_ENCODER)
297  ff_h263dsp_init(&s->h263dsp);
298  if (!s->dct_quantize)
299  s->dct_quantize = ff_dct_quantize_c;
300  if (!s->denoise_dct)
301  s->denoise_dct = denoise_dct_c;
302  s->fast_dct_quantize = s->dct_quantize;
303  if (s->avctx->trellis)
304  s->dct_quantize = dct_quantize_trellis_c;
305 
306  return 0;
307 }
308 
309 /* init video encoder */
311 {
313  AVCPBProperties *cpb_props;
314  int i, ret;
315  int mb_array_size, mv_table_size;
316 
318 
319  switch (avctx->pix_fmt) {
320  case AV_PIX_FMT_YUVJ444P:
321  case AV_PIX_FMT_YUV444P:
322  s->chroma_format = CHROMA_444;
323  break;
324  case AV_PIX_FMT_YUVJ422P:
325  case AV_PIX_FMT_YUV422P:
326  s->chroma_format = CHROMA_422;
327  break;
328  case AV_PIX_FMT_YUVJ420P:
329  case AV_PIX_FMT_YUV420P:
330  default:
331  s->chroma_format = CHROMA_420;
332  break;
333  }
334 
336 
337  s->bit_rate = avctx->bit_rate;
338  s->width = avctx->width;
339  s->height = avctx->height;
340  if (avctx->gop_size > 600 &&
343  "keyframe interval too large!, reducing it from %d to %d\n",
344  avctx->gop_size, 600);
345  avctx->gop_size = 600;
346  }
347  s->gop_size = avctx->gop_size;
348  s->avctx = avctx;
350  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
351  "is %d.\n", MAX_B_FRAMES);
353  } else if (avctx->max_b_frames < 0) {
355  "max b frames must be 0 or positive for mpegvideo based encoders\n");
356  return AVERROR(EINVAL);
357  }
358  s->max_b_frames = avctx->max_b_frames;
359  s->codec_id = avctx->codec->id;
360  if (s->max_b_frames && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
361  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
362  return AVERROR(EINVAL);
363  }
364 
365  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
366  s->rtp_mode = !!s->rtp_payload_size;
367  s->intra_dc_precision = avctx->intra_dc_precision;
368 
369  // workaround some differences between how applications specify dc precision
370  if (s->intra_dc_precision < 0) {
371  s->intra_dc_precision += 8;
372  } else if (s->intra_dc_precision >= 8)
373  s->intra_dc_precision -= 8;
374 
375  if (s->intra_dc_precision < 0) {
377  "intra dc precision must be positive, note some applications use"
378  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
379  return AVERROR(EINVAL);
380  }
381 
382  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
383  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
384  return AVERROR(EINVAL);
385  }
386  s->user_specified_pts = AV_NOPTS_VALUE;
387 
388  if (s->gop_size <= 1) {
389  s->intra_only = 1;
390  s->gop_size = 12;
391  } else {
392  s->intra_only = 0;
393  }
394 
395  /* Fixed QSCALE */
396  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
397 
398  s->adaptive_quant = (avctx->lumi_masking ||
399  avctx->dark_masking ||
402  avctx->p_masking ||
403  s->border_masking ||
404  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
405  !s->fixed_qscale;
406 
407  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
408 
410  switch(avctx->codec_id) {
413  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
414  break;
415  case AV_CODEC_ID_MPEG4:
419  if (avctx->rc_max_rate >= 15000000) {
420  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
421  } else if(avctx->rc_max_rate >= 2000000) {
422  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
423  } else if(avctx->rc_max_rate >= 384000) {
424  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
425  } else
426  avctx->rc_buffer_size = 40;
427  avctx->rc_buffer_size *= 16384;
428  break;
429  }
430  if (avctx->rc_buffer_size) {
431  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
432  }
433  }
434 
435  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
436  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
437  return AVERROR(EINVAL);
438  }
439 
442  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
443  }
444 
446  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
447  return AVERROR(EINVAL);
448  }
449 
451  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
452  return AVERROR(EINVAL);
453  }
454 
455  if (avctx->rc_max_rate &&
459  "impossible bitrate constraints, this will fail\n");
460  }
461 
462  if (avctx->rc_buffer_size &&
463  avctx->bit_rate * (int64_t)avctx->time_base.num >
464  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
465  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
466  return AVERROR(EINVAL);
467  }
468 
469  if (!s->fixed_qscale &&
472  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
474  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
475  if (nbt <= INT_MAX) {
476  avctx->bit_rate_tolerance = nbt;
477  } else
478  avctx->bit_rate_tolerance = INT_MAX;
479  }
480 
481  if (avctx->rc_max_rate &&
483  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
484  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
485  90000LL * (avctx->rc_buffer_size - 1) >
486  avctx->rc_max_rate * 0xFFFFLL) {
488  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
489  "specified vbv buffer is too large for the given bitrate!\n");
490  }
491 
492  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
493  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
494  s->codec_id != AV_CODEC_ID_FLV1) {
495  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
496  return AVERROR(EINVAL);
497  }
498 
499  if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
501  "OBMC is only supported with simple mb decision\n");
502  return AVERROR(EINVAL);
503  }
504 
505  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
506  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
507  return AVERROR(EINVAL);
508  }
509 
510  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
511  s->codec_id == AV_CODEC_ID_H263 ||
512  s->codec_id == AV_CODEC_ID_H263P) &&
513  (avctx->sample_aspect_ratio.num > 255 ||
514  avctx->sample_aspect_ratio.den > 255)) {
516  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
520  }
521 
522  if ((s->codec_id == AV_CODEC_ID_H263 ||
523  s->codec_id == AV_CODEC_ID_H263P) &&
524  (avctx->width > 2048 ||
525  avctx->height > 1152 )) {
526  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
527  return AVERROR(EINVAL);
528  }
529  if ((s->codec_id == AV_CODEC_ID_H263 ||
530  s->codec_id == AV_CODEC_ID_H263P ||
531  s->codec_id == AV_CODEC_ID_RV20) &&
532  ((avctx->width &3) ||
533  (avctx->height&3) )) {
534  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
535  return AVERROR(EINVAL);
536  }
537 
538  if (s->codec_id == AV_CODEC_ID_RV10 &&
539  (avctx->width &15 ||
540  avctx->height&15 )) {
541  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
542  return AVERROR(EINVAL);
543  }
544 
545  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
546  s->codec_id == AV_CODEC_ID_WMV2) &&
547  avctx->width & 1) {
548  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
549  return AVERROR(EINVAL);
550  }
551 
553  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
554  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
555  return AVERROR(EINVAL);
556  }
557 
558  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
559  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
560  return AVERROR(EINVAL);
561  }
562 
563  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
565  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
566  return AVERROR(EINVAL);
567  }
568 
569  if (s->scenechange_threshold < 1000000000 &&
572  "closed gop with scene change detection are not supported yet, "
573  "set threshold to 1000000000\n");
574  return AVERROR_PATCHWELCOME;
575  }
576 
578  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
581  "low delay forcing is only available for mpeg2, "
582  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
583  return AVERROR(EINVAL);
584  }
585  if (s->max_b_frames != 0) {
587  "B-frames cannot be used with low delay\n");
588  return AVERROR(EINVAL);
589  }
590  }
591 
592  if (s->q_scale_type == 1) {
593  if (avctx->qmax > 28) {
595  "non linear quant only supports qmax <= 28 currently\n");
596  return AVERROR_PATCHWELCOME;
597  }
598  }
599 
600  if (avctx->slices > 1 &&
602  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
603  return AVERROR(EINVAL);
604  }
605 
606  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
608  "notice: b_frame_strategy only affects the first pass\n");
609  s->b_frame_strategy = 0;
610  }
611 
613  if (i > 1) {
614  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
615  avctx->time_base.den /= i;
616  avctx->time_base.num /= i;
617  //return -1;
618  }
619 
620  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
621  // (a + x * 3 / 8) / x
622  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
623  s->inter_quant_bias = 0;
624  } else {
625  s->intra_quant_bias = 0;
626  // (a - x / 4) / x
627  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
628  }
629 
630  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
631  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
632  return AVERROR(EINVAL);
633  }
634 
635  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
636 
637  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
638  avctx->time_base.den > (1 << 16) - 1) {
640  "timebase %d/%d not supported by MPEG 4 standard, "
641  "the maximum admitted value for the timebase denominator "
642  "is %d\n", avctx->time_base.num, avctx->time_base.den,
643  (1 << 16) - 1);
644  return AVERROR(EINVAL);
645  }
646  s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
647 
648  switch (avctx->codec->id) {
649 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
651  s->rtp_mode = 1;
652  /* fallthrough */
654  s->out_format = FMT_MPEG1;
655  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
656  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
658  break;
659 #endif
660 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
661  case AV_CODEC_ID_MJPEG:
662  case AV_CODEC_ID_AMV:
663  s->out_format = FMT_MJPEG;
664  s->intra_only = 1; /* force intra only for jpeg */
665  if ((ret = ff_mjpeg_encode_init(s)) < 0)
666  return ret;
667  avctx->delay = 0;
668  s->low_delay = 1;
669  break;
670 #endif
671  case AV_CODEC_ID_SPEEDHQ:
672  s->out_format = FMT_SPEEDHQ;
673  s->intra_only = 1; /* force intra only for SHQ */
674  if (!CONFIG_SPEEDHQ_ENCODER)
676  if ((ret = ff_speedhq_encode_init(s)) < 0)
677  return ret;
678  avctx->delay = 0;
679  s->low_delay = 1;
680  break;
681  case AV_CODEC_ID_H261:
682  if (!CONFIG_H261_ENCODER)
685  if (ret < 0)
686  return ret;
687  s->out_format = FMT_H261;
688  avctx->delay = 0;
689  s->low_delay = 1;
690  s->rtp_mode = 0; /* Sliced encoding not supported */
691  break;
692  case AV_CODEC_ID_H263:
693  if (!CONFIG_H263_ENCODER)
696  s->width, s->height) == 8) {
698  "The specified picture size of %dx%d is not valid for "
699  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
700  "352x288, 704x576, and 1408x1152. "
701  "Try H.263+.\n", s->width, s->height);
702  return AVERROR(EINVAL);
703  }
704  s->out_format = FMT_H263;
705  avctx->delay = 0;
706  s->low_delay = 1;
707  break;
708  case AV_CODEC_ID_H263P:
709  s->out_format = FMT_H263;
710  s->h263_plus = 1;
711  /* Fx */
712  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
713  s->modified_quant = s->h263_aic;
714  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
715  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
716 
717  /* /Fx */
718  /* These are just to be sure */
719  avctx->delay = 0;
720  s->low_delay = 1;
721  break;
722  case AV_CODEC_ID_FLV1:
723  s->out_format = FMT_H263;
724  s->h263_flv = 2; /* format = 1; 11-bit codes */
725  s->unrestricted_mv = 1;
726  s->rtp_mode = 0; /* don't allow GOB */
727  avctx->delay = 0;
728  s->low_delay = 1;
729  break;
730  case AV_CODEC_ID_RV10:
731  s->out_format = FMT_H263;
732  avctx->delay = 0;
733  s->low_delay = 1;
734  break;
735  case AV_CODEC_ID_RV20:
736  s->out_format = FMT_H263;
737  avctx->delay = 0;
738  s->low_delay = 1;
739  s->modified_quant = 1;
740  s->h263_aic = 1;
741  s->h263_plus = 1;
742  s->loop_filter = 1;
743  s->unrestricted_mv = 0;
744  break;
745  case AV_CODEC_ID_MPEG4:
746  s->out_format = FMT_H263;
747  s->h263_pred = 1;
748  s->unrestricted_mv = 1;
749  s->low_delay = s->max_b_frames ? 0 : 1;
750  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
751  break;
753  s->out_format = FMT_H263;
754  s->h263_pred = 1;
755  s->unrestricted_mv = 1;
756  s->msmpeg4_version = 2;
757  avctx->delay = 0;
758  s->low_delay = 1;
759  break;
761  s->out_format = FMT_H263;
762  s->h263_pred = 1;
763  s->unrestricted_mv = 1;
764  s->msmpeg4_version = 3;
765  s->flipflop_rounding = 1;
766  avctx->delay = 0;
767  s->low_delay = 1;
768  break;
769  case AV_CODEC_ID_WMV1:
770  s->out_format = FMT_H263;
771  s->h263_pred = 1;
772  s->unrestricted_mv = 1;
773  s->msmpeg4_version = 4;
774  s->flipflop_rounding = 1;
775  avctx->delay = 0;
776  s->low_delay = 1;
777  break;
778  case AV_CODEC_ID_WMV2:
779  s->out_format = FMT_H263;
780  s->h263_pred = 1;
781  s->unrestricted_mv = 1;
782  s->msmpeg4_version = 5;
783  s->flipflop_rounding = 1;
784  avctx->delay = 0;
785  s->low_delay = 1;
786  break;
787  default:
788  return AVERROR(EINVAL);
789  }
790 
791  avctx->has_b_frames = !s->low_delay;
792 
793  s->encoding = 1;
794 
795  s->progressive_frame =
796  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
798  s->alternate_scan);
799 
800  if (s->lmin > s->lmax) {
801  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", s->lmax);
802  s->lmin = s->lmax;
803  }
804 
805  /* init */
807  if ((ret = ff_mpv_common_init(s)) < 0)
808  return ret;
809 
810  ff_fdctdsp_init(&s->fdsp, avctx);
811  ff_me_cmp_init(&s->mecc, avctx);
812  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
813  ff_pixblockdsp_init(&s->pdsp, avctx);
814 
815  if (!(avctx->stats_out = av_mallocz(256)) ||
816  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
817  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
818  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
819  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
820  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
821  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
822  !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_B_FRAMES + 1) ||
823  !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_B_FRAMES + 1) ||
824  !(s->new_picture = av_frame_alloc()))
825  return AVERROR(ENOMEM);
826 
827  /* Allocate MV tables; the MV and MB tables will be copied
828  * to slice contexts by ff_update_duplicate_context(). */
829  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
830  if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) ||
831  !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) ||
832  !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) ||
833  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_forw_mv_table_base, mv_table_size) ||
834  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_back_mv_table_base, mv_table_size) ||
835  !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size))
836  return AVERROR(ENOMEM);
837  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
838  s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
839  s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
840  s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
841  s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
842  s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
843 
844  /* Allocate MB type table */
845  mb_array_size = s->mb_stride * s->mb_height;
846  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) ||
847  !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) ||
848  !FF_ALLOC_TYPED_ARRAY (s->cplx_tab, mb_array_size) ||
849  !FF_ALLOC_TYPED_ARRAY (s->bits_tab, mb_array_size) ||
850  !FF_ALLOCZ_TYPED_ARRAY(s->mc_mb_var, mb_array_size) ||
851  !FF_ALLOCZ_TYPED_ARRAY(s->mb_var, mb_array_size) ||
852  !(s->mb_mean = av_mallocz(mb_array_size)))
853  return AVERROR(ENOMEM);
854 
855 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
856  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
857  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
858  int16_t (*tmp1)[2];
859  uint8_t *tmp2;
860  if (!(tmp1 = ALLOCZ_ARRAYS(s->b_field_mv_table_base, 8, mv_table_size)) ||
861  !(tmp2 = ALLOCZ_ARRAYS(s->b_field_select_table[0][0], 2 * 4, mv_table_size)) ||
862  !ALLOCZ_ARRAYS(s->p_field_select_table[0], 2 * 2, mv_table_size))
863  return AVERROR(ENOMEM);
864 
865  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
866  tmp1 += s->mb_stride + 1;
867 
868  for (int i = 0; i < 2; i++) {
869  for (int j = 0; j < 2; j++) {
870  for (int k = 0; k < 2; k++) {
871  s->b_field_mv_table[i][j][k] = tmp1;
872  tmp1 += mv_table_size;
873  }
874  s->b_field_select_table[i][j] = tmp2;
875  tmp2 += 2 * mv_table_size;
876  }
877  }
878  }
879 
880  if (s->noise_reduction) {
881  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
882  return AVERROR(ENOMEM);
883  }
884 
886 
887  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
888  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
889  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
890  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
891  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
892  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
893  } else {
894  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
895  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
896  }
897 
898  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
899  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
900 
901  if (s->slice_context_count > 1) {
902  s->rtp_mode = 1;
903 
905  s->h263_slice_structured = 1;
906  }
907 
908  s->quant_precision = 5;
909 
910  ret = ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
911  ret |= ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
912  if (ret < 0)
913  return AVERROR(EINVAL);
914 
915  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263) {
917  if (CONFIG_MSMPEG4ENC && s->msmpeg4_version)
919  }
920 
921  /* init q matrix */
922  for (i = 0; i < 64; i++) {
923  int j = s->idsp.idct_permutation[i];
924  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
925  s->mpeg_quant) {
926  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
927  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
928  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
929  s->intra_matrix[j] =
930  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
931  } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
932  s->intra_matrix[j] =
933  s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
934  } else {
935  /* MPEG-1/2 */
936  s->chroma_intra_matrix[j] =
937  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
938  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
939  }
940  if (avctx->intra_matrix)
941  s->intra_matrix[j] = avctx->intra_matrix[i];
942  if (avctx->inter_matrix)
943  s->inter_matrix[j] = avctx->inter_matrix[i];
944  }
945 
946  /* precompute matrix */
947  /* for mjpeg, we do include qscale in the matrix */
948  if (s->out_format != FMT_MJPEG) {
949  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
950  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
951  31, 1);
952  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
953  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
954  31, 0);
955  }
956 
957  if ((ret = ff_rate_control_init(s)) < 0)
958  return ret;
959 
960  if (s->b_frame_strategy == 2) {
961  for (i = 0; i < s->max_b_frames + 2; i++) {
962  s->tmp_frames[i] = av_frame_alloc();
963  if (!s->tmp_frames[i])
964  return AVERROR(ENOMEM);
965 
966  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
967  s->tmp_frames[i]->width = s->width >> s->brd_scale;
968  s->tmp_frames[i]->height = s->height >> s->brd_scale;
969 
970  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
971  if (ret < 0)
972  return ret;
973  }
974  }
975 
976  cpb_props = ff_encode_add_cpb_side_data(avctx);
977  if (!cpb_props)
978  return AVERROR(ENOMEM);
979  cpb_props->max_bitrate = avctx->rc_max_rate;
980  cpb_props->min_bitrate = avctx->rc_min_rate;
981  cpb_props->avg_bitrate = avctx->bit_rate;
982  cpb_props->buffer_size = avctx->rc_buffer_size;
983 
984  return 0;
985 }
986 
988 {
990  int i;
991 
993 
995 
996  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
997  av_frame_free(&s->tmp_frames[i]);
998 
999  av_frame_free(&s->new_picture);
1000 
1002 
1003  av_freep(&s->p_mv_table_base);
1004  av_freep(&s->b_forw_mv_table_base);
1005  av_freep(&s->b_back_mv_table_base);
1006  av_freep(&s->b_bidir_forw_mv_table_base);
1007  av_freep(&s->b_bidir_back_mv_table_base);
1008  av_freep(&s->b_direct_mv_table_base);
1009  av_freep(&s->b_field_mv_table_base);
1010  av_freep(&s->b_field_select_table[0][0]);
1011  av_freep(&s->p_field_select_table[0]);
1012 
1013  av_freep(&s->mb_type);
1014  av_freep(&s->lambda_table);
1015 
1016  av_freep(&s->cplx_tab);
1017  av_freep(&s->bits_tab);
1018 
1019  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1020  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1021  s->q_chroma_intra_matrix= NULL;
1022  s->q_chroma_intra_matrix16= NULL;
1023  av_freep(&s->q_intra_matrix);
1024  av_freep(&s->q_inter_matrix);
1025  av_freep(&s->q_intra_matrix16);
1026  av_freep(&s->q_inter_matrix16);
1027  av_freep(&s->input_picture);
1028  av_freep(&s->reordered_input_picture);
1029  av_freep(&s->dct_offset);
1030  av_freep(&s->mb_var);
1031  av_freep(&s->mc_mb_var);
1032  av_freep(&s->mb_mean);
1033 
1034  return 0;
1035 }
1036 
1037 #define IS_ENCODER 1
1039 
1040 static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
1041 {
1042  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1043  /* print DCT coefficients */
1044  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1045  for (int i = 0; i < 6; i++) {
1046  for (int j = 0; j < 64; j++) {
1047  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1048  block[i][s->idsp.idct_permutation[j]]);
1049  }
1050  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1051  }
1052  }
1053 
1055 }
1056 
1057 static int get_sae(const uint8_t *src, int ref, int stride)
1058 {
1059  int x,y;
1060  int acc = 0;
1061 
1062  for (y = 0; y < 16; y++) {
1063  for (x = 0; x < 16; x++) {
1064  acc += FFABS(src[x + y * stride] - ref);
1065  }
1066  }
1067 
1068  return acc;
1069 }
1070 
1071 static int get_intra_count(MpegEncContext *s, const uint8_t *src,
1072  const uint8_t *ref, int stride)
1073 {
1074  int x, y, w, h;
1075  int acc = 0;
1076 
1077  w = s->width & ~15;
1078  h = s->height & ~15;
1079 
1080  for (y = 0; y < h; y += 16) {
1081  for (x = 0; x < w; x += 16) {
1082  int offset = x + y * stride;
1083  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1084  stride, 16);
1085  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1086  int sae = get_sae(src + offset, mean, stride);
1087 
1088  acc += sae + 500 < sad;
1089  }
1090  }
1091  return acc;
1092 }
1093 
1095 {
1096  AVCodecContext *avctx = s->avctx;
1097  int ret;
1098 
1099  pic->f->width = avctx->width + 2 * EDGE_WIDTH;
1100  pic->f->height = avctx->height + 2 * EDGE_WIDTH;
1101 
1102  ret = ff_encode_alloc_frame(avctx, pic->f);
1103  if (ret < 0)
1104  return ret;
1105 
1106  for (int i = 0; pic->f->data[i]; i++) {
1107  int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
1108  pic->f->linesize[i] +
1109  (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
1110  pic->f->data[i] += offset;
1111  }
1112  pic->f->width = avctx->width;
1113  pic->f->height = avctx->height;
1114 
1115  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 1, s->out_format,
1116  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1117  &s->linesize, &s->uvlinesize);
1118 }
1119 
1120 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1121 {
1122  Picture *pic = NULL;
1123  int64_t pts;
1124  int i, display_picture_number = 0, ret;
1125  int encoding_delay = s->max_b_frames ? s->max_b_frames
1126  : (s->low_delay ? 0 : 1);
1127  int flush_offset = 1;
1128  int direct = 1;
1129 
1130  if (pic_arg) {
1131  pts = pic_arg->pts;
1132  display_picture_number = s->input_picture_number++;
1133 
1134  if (pts != AV_NOPTS_VALUE) {
1135  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1136  int64_t last = s->user_specified_pts;
1137 
1138  if (pts <= last) {
1139  av_log(s->avctx, AV_LOG_ERROR,
1140  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1141  pts, last);
1142  return AVERROR(EINVAL);
1143  }
1144 
1145  if (!s->low_delay && display_picture_number == 1)
1146  s->dts_delta = pts - last;
1147  }
1148  s->user_specified_pts = pts;
1149  } else {
1150  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1151  s->user_specified_pts =
1152  pts = s->user_specified_pts + 1;
1153  av_log(s->avctx, AV_LOG_INFO,
1154  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1155  pts);
1156  } else {
1157  pts = display_picture_number;
1158  }
1159  }
1160 
1161  if (pic_arg->linesize[0] != s->linesize ||
1162  pic_arg->linesize[1] != s->uvlinesize ||
1163  pic_arg->linesize[2] != s->uvlinesize)
1164  direct = 0;
1165  if ((s->width & 15) || (s->height & 15))
1166  direct = 0;
1167  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1168  direct = 0;
1169  if (s->linesize & (STRIDE_ALIGN-1))
1170  direct = 0;
1171 
1172  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1173  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1174 
1175  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1176  if (i < 0)
1177  return i;
1178 
1179  pic = &s->picture[i];
1180  pic->reference = 3;
1181 
1182  if (direct) {
1183  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1184  return ret;
1185  pic->shared = 1;
1186  } else {
1187  ret = alloc_picture(s, pic);
1188  if (ret < 0)
1189  return ret;
1190  ret = av_frame_copy_props(pic->f, pic_arg);
1191  if (ret < 0) {
1192  ff_mpeg_unref_picture(pic);
1193  return ret;
1194  }
1195 
1196  for (int i = 0; i < 3; i++) {
1197  ptrdiff_t src_stride = pic_arg->linesize[i];
1198  ptrdiff_t dst_stride = i ? s->uvlinesize : s->linesize;
1199  int h_shift = i ? s->chroma_x_shift : 0;
1200  int v_shift = i ? s->chroma_y_shift : 0;
1201  int w = s->width >> h_shift;
1202  int h = s->height >> v_shift;
1203  const uint8_t *src = pic_arg->data[i];
1204  uint8_t *dst = pic->f->data[i];
1205  int vpad = 16;
1206 
1207  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1208  && !s->progressive_sequence
1209  && FFALIGN(s->height, 32) - s->height > 16)
1210  vpad = 32;
1211 
1212  if (!s->avctx->rc_buffer_size)
1213  dst += INPLACE_OFFSET;
1214 
1215  if (src_stride == dst_stride)
1216  memcpy(dst, src, src_stride * h - src_stride + w);
1217  else {
1218  int h2 = h;
1219  uint8_t *dst2 = dst;
1220  while (h2--) {
1221  memcpy(dst2, src, w);
1222  dst2 += dst_stride;
1223  src += src_stride;
1224  }
1225  }
1226  if ((s->width & 15) || (s->height & (vpad-1))) {
1227  s->mpvencdsp.draw_edges(dst, dst_stride,
1228  w, h,
1229  16 >> h_shift,
1230  vpad >> v_shift,
1231  EDGE_BOTTOM);
1232  }
1233  }
1234  emms_c();
1235  }
1236 
1237  pic->display_picture_number = display_picture_number;
1238  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1239  } else {
1240  /* Flushing: When we have not received enough input frames,
1241  * ensure s->input_picture[0] contains the first picture */
1242  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1243  if (s->input_picture[flush_offset])
1244  break;
1245 
1246  if (flush_offset <= 1)
1247  flush_offset = 1;
1248  else
1249  encoding_delay = encoding_delay - flush_offset + 1;
1250  }
1251 
1252  /* shift buffer entries */
1253  for (int i = flush_offset; i <= MAX_B_FRAMES; i++)
1254  s->input_picture[i - flush_offset] = s->input_picture[i];
1255 
1256  s->input_picture[encoding_delay] = pic;
1257 
1258  return 0;
1259 }
1260 
1261 static int skip_check(MpegEncContext *s, const Picture *p, const Picture *ref)
1262 {
1263  int x, y, plane;
1264  int score = 0;
1265  int64_t score64 = 0;
1266 
1267  for (plane = 0; plane < 3; plane++) {
1268  const int stride = p->f->linesize[plane];
1269  const int bw = plane ? 1 : 2;
1270  for (y = 0; y < s->mb_height * bw; y++) {
1271  for (x = 0; x < s->mb_width * bw; x++) {
1272  int off = p->shared ? 0 : 16;
1273  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1274  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1275  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1276 
1277  switch (FFABS(s->frame_skip_exp)) {
1278  case 0: score = FFMAX(score, v); break;
1279  case 1: score += FFABS(v); break;
1280  case 2: score64 += v * (int64_t)v; break;
1281  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1282  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1283  }
1284  }
1285  }
1286  }
1287  emms_c();
1288 
1289  if (score)
1290  score64 = score;
1291  if (s->frame_skip_exp < 0)
1292  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1293  -1.0/s->frame_skip_exp);
1294 
1295  if (score64 < s->frame_skip_threshold)
1296  return 1;
1297  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1298  return 1;
1299  return 0;
1300 }
1301 
1303 {
1304  int ret;
1305  int size = 0;
1306 
1308  if (ret < 0)
1309  return ret;
1310 
1311  do {
1313  if (ret >= 0) {
1314  size += pkt->size;
1316  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1317  return ret;
1318  } while (ret >= 0);
1319 
1320  return size;
1321 }
1322 
1324 {
1325  AVPacket *pkt;
1326  const int scale = s->brd_scale;
1327  int width = s->width >> scale;
1328  int height = s->height >> scale;
1329  int i, j, out_size, p_lambda, b_lambda, lambda2;
1330  int64_t best_rd = INT64_MAX;
1331  int best_b_count = -1;
1332  int ret = 0;
1333 
1334  av_assert0(scale >= 0 && scale <= 3);
1335 
1336  pkt = av_packet_alloc();
1337  if (!pkt)
1338  return AVERROR(ENOMEM);
1339 
1340  //emms_c();
1341  //s->next_picture_ptr->quality;
1342  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1343  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1344  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1345  if (!b_lambda) // FIXME we should do this somewhere else
1346  b_lambda = p_lambda;
1347  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1349 
1350  for (i = 0; i < s->max_b_frames + 2; i++) {
1351  const Picture *pre_input_ptr = i ? s->input_picture[i - 1] :
1352  s->next_picture_ptr;
1353 
1354  if (pre_input_ptr) {
1355  const uint8_t *data[4];
1356  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1357 
1358  if (!pre_input_ptr->shared && i) {
1359  data[0] += INPLACE_OFFSET;
1360  data[1] += INPLACE_OFFSET;
1361  data[2] += INPLACE_OFFSET;
1362  }
1363 
1364  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1365  s->tmp_frames[i]->linesize[0],
1366  data[0],
1367  pre_input_ptr->f->linesize[0],
1368  width, height);
1369  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1370  s->tmp_frames[i]->linesize[1],
1371  data[1],
1372  pre_input_ptr->f->linesize[1],
1373  width >> 1, height >> 1);
1374  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1375  s->tmp_frames[i]->linesize[2],
1376  data[2],
1377  pre_input_ptr->f->linesize[2],
1378  width >> 1, height >> 1);
1379  }
1380  }
1381 
1382  for (j = 0; j < s->max_b_frames + 1; j++) {
1383  AVCodecContext *c;
1384  int64_t rd = 0;
1385 
1386  if (!s->input_picture[j])
1387  break;
1388 
1390  if (!c) {
1391  ret = AVERROR(ENOMEM);
1392  goto fail;
1393  }
1394 
1395  c->width = width;
1396  c->height = height;
1398  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1399  c->mb_decision = s->avctx->mb_decision;
1400  c->me_cmp = s->avctx->me_cmp;
1401  c->mb_cmp = s->avctx->mb_cmp;
1402  c->me_sub_cmp = s->avctx->me_sub_cmp;
1403  c->pix_fmt = AV_PIX_FMT_YUV420P;
1404  c->time_base = s->avctx->time_base;
1405  c->max_b_frames = s->max_b_frames;
1406 
1407  ret = avcodec_open2(c, s->avctx->codec, NULL);
1408  if (ret < 0)
1409  goto fail;
1410 
1411 
1412  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1413  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1414 
1415  out_size = encode_frame(c, s->tmp_frames[0], pkt);
1416  if (out_size < 0) {
1417  ret = out_size;
1418  goto fail;
1419  }
1420 
1421  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1422 
1423  for (i = 0; i < s->max_b_frames + 1; i++) {
1424  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1425 
1426  s->tmp_frames[i + 1]->pict_type = is_p ?
1428  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1429 
1430  out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1431  if (out_size < 0) {
1432  ret = out_size;
1433  goto fail;
1434  }
1435 
1436  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1437  }
1438 
1439  /* get the delayed frames */
1441  if (out_size < 0) {
1442  ret = out_size;
1443  goto fail;
1444  }
1445  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1446 
1447  rd += c->error[0] + c->error[1] + c->error[2];
1448 
1449  if (rd < best_rd) {
1450  best_rd = rd;
1451  best_b_count = j;
1452  }
1453 
1454 fail:
1457  if (ret < 0) {
1458  best_b_count = ret;
1459  break;
1460  }
1461  }
1462 
1463  av_packet_free(&pkt);
1464 
1465  return best_b_count;
1466 }
1467 
1469 {
1470  int i, ret;
1471 
1472  for (int i = 1; i <= MAX_B_FRAMES; i++)
1473  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1474  s->reordered_input_picture[MAX_B_FRAMES] = NULL;
1475 
1476  /* set next picture type & ordering */
1477  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1478  if (s->frame_skip_threshold || s->frame_skip_factor) {
1479  if (s->picture_in_gop_number < s->gop_size &&
1480  s->next_picture_ptr &&
1481  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1482  // FIXME check that the gop check above is +-1 correct
1483  av_frame_unref(s->input_picture[0]->f);
1484 
1485  ff_vbv_update(s, 0);
1486 
1487  goto no_output_pic;
1488  }
1489  }
1490 
1491  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1492  !s->next_picture_ptr || s->intra_only) {
1493  s->reordered_input_picture[0] = s->input_picture[0];
1494  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1495  s->reordered_input_picture[0]->coded_picture_number =
1496  s->coded_picture_number++;
1497  } else {
1498  int b_frames = 0;
1499 
1500  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1501  for (i = 0; i < s->max_b_frames + 1; i++) {
1502  int pict_num = s->input_picture[0]->display_picture_number + i;
1503 
1504  if (pict_num >= s->rc_context.num_entries)
1505  break;
1506  if (!s->input_picture[i]) {
1507  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1508  break;
1509  }
1510 
1511  s->input_picture[i]->f->pict_type =
1512  s->rc_context.entry[pict_num].new_pict_type;
1513  }
1514  }
1515 
1516  if (s->b_frame_strategy == 0) {
1517  b_frames = s->max_b_frames;
1518  while (b_frames && !s->input_picture[b_frames])
1519  b_frames--;
1520  } else if (s->b_frame_strategy == 1) {
1521  for (i = 1; i < s->max_b_frames + 1; i++) {
1522  if (s->input_picture[i] &&
1523  s->input_picture[i]->b_frame_score == 0) {
1524  s->input_picture[i]->b_frame_score =
1526  s->input_picture[i ]->f->data[0],
1527  s->input_picture[i - 1]->f->data[0],
1528  s->linesize) + 1;
1529  }
1530  }
1531  for (i = 0; i < s->max_b_frames + 1; i++) {
1532  if (!s->input_picture[i] ||
1533  s->input_picture[i]->b_frame_score - 1 >
1534  s->mb_num / s->b_sensitivity)
1535  break;
1536  }
1537 
1538  b_frames = FFMAX(0, i - 1);
1539 
1540  /* reset scores */
1541  for (i = 0; i < b_frames + 1; i++) {
1542  s->input_picture[i]->b_frame_score = 0;
1543  }
1544  } else if (s->b_frame_strategy == 2) {
1545  b_frames = estimate_best_b_count(s);
1546  if (b_frames < 0) {
1547  ff_mpeg_unref_picture(s->input_picture[0]);
1548  return b_frames;
1549  }
1550  }
1551 
1552  emms_c();
1553 
1554  for (i = b_frames - 1; i >= 0; i--) {
1555  int type = s->input_picture[i]->f->pict_type;
1556  if (type && type != AV_PICTURE_TYPE_B)
1557  b_frames = i;
1558  }
1559  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1560  b_frames == s->max_b_frames) {
1561  av_log(s->avctx, AV_LOG_ERROR,
1562  "warning, too many B-frames in a row\n");
1563  }
1564 
1565  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1566  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1567  s->gop_size > s->picture_in_gop_number) {
1568  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1569  } else {
1570  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1571  b_frames = 0;
1572  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1573  }
1574  }
1575 
1576  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1577  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1578  b_frames--;
1579 
1580  s->reordered_input_picture[0] = s->input_picture[b_frames];
1581  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1582  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1583  s->reordered_input_picture[0]->coded_picture_number =
1584  s->coded_picture_number++;
1585  for (i = 0; i < b_frames; i++) {
1586  s->reordered_input_picture[i + 1] = s->input_picture[i];
1587  s->reordered_input_picture[i + 1]->f->pict_type =
1589  s->reordered_input_picture[i + 1]->coded_picture_number =
1590  s->coded_picture_number++;
1591  }
1592  }
1593  }
1594 no_output_pic:
1595  av_frame_unref(s->new_picture);
1596 
1597  if (s->reordered_input_picture[0]) {
1598  s->reordered_input_picture[0]->reference =
1599  s->reordered_input_picture[0]->f->pict_type !=
1600  AV_PICTURE_TYPE_B ? 3 : 0;
1601 
1602  if ((ret = av_frame_ref(s->new_picture,
1603  s->reordered_input_picture[0]->f)))
1604  goto fail;
1605 
1606  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1607  // input is a shared pix, so we can't modify it -> allocate a new
1608  // one & ensure that the shared one is reuseable
1609 
1610  Picture *pic;
1611  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1612  if (i < 0)
1613  return i;
1614  pic = &s->picture[i];
1615 
1616  pic->reference = s->reordered_input_picture[0]->reference;
1617  ret = alloc_picture(s, pic);
1618  if (ret < 0)
1619  goto fail;
1620 
1621  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1622  if (ret < 0) {
1623  ff_mpeg_unref_picture(pic);
1624  goto fail;
1625  }
1626  pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
1627  pic->display_picture_number = s->reordered_input_picture[0]->display_picture_number;
1628 
1629  /* mark us unused / free shared pic */
1630  av_frame_unref(s->reordered_input_picture[0]->f);
1631  s->reordered_input_picture[0]->shared = 0;
1632 
1633  s->current_picture_ptr = pic;
1634  } else {
1635  // input is not a shared pix -> reuse buffer for current_pix
1636  s->current_picture_ptr = s->reordered_input_picture[0];
1637  for (i = 0; i < 4; i++) {
1638  if (s->new_picture->data[i])
1639  s->new_picture->data[i] += INPLACE_OFFSET;
1640  }
1641  }
1642  s->picture_number = s->current_picture_ptr->display_picture_number;
1643 
1644  }
1645  return 0;
1646 fail:
1647  ff_mpeg_unref_picture(s->reordered_input_picture[0]);
1648  return ret;
1649 }
1650 
1652 {
1653  if (s->unrestricted_mv &&
1654  s->current_picture.reference &&
1655  !s->intra_only) {
1656  int hshift = s->chroma_x_shift;
1657  int vshift = s->chroma_y_shift;
1658  s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1659  s->current_picture.f->linesize[0],
1660  s->h_edge_pos, s->v_edge_pos,
1662  EDGE_TOP | EDGE_BOTTOM);
1663  s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1664  s->current_picture.f->linesize[1],
1665  s->h_edge_pos >> hshift,
1666  s->v_edge_pos >> vshift,
1667  EDGE_WIDTH >> hshift,
1668  EDGE_WIDTH >> vshift,
1669  EDGE_TOP | EDGE_BOTTOM);
1670  s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1671  s->current_picture.f->linesize[2],
1672  s->h_edge_pos >> hshift,
1673  s->v_edge_pos >> vshift,
1674  EDGE_WIDTH >> hshift,
1675  EDGE_WIDTH >> vshift,
1676  EDGE_TOP | EDGE_BOTTOM);
1677  }
1678 
1679  emms_c();
1680 
1681  s->last_pict_type = s->pict_type;
1682  s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1683  if (s->pict_type!= AV_PICTURE_TYPE_B)
1684  s->last_non_b_pict_type = s->pict_type;
1685 }
1686 
1688 {
1689  int intra, i;
1690 
1691  for (intra = 0; intra < 2; intra++) {
1692  if (s->dct_count[intra] > (1 << 16)) {
1693  for (i = 0; i < 64; i++) {
1694  s->dct_error_sum[intra][i] >>= 1;
1695  }
1696  s->dct_count[intra] >>= 1;
1697  }
1698 
1699  for (i = 0; i < 64; i++) {
1700  s->dct_offset[intra][i] = (s->noise_reduction *
1701  s->dct_count[intra] +
1702  s->dct_error_sum[intra][i] / 2) /
1703  (s->dct_error_sum[intra][i] + 1);
1704  }
1705  }
1706 }
1707 
1709 {
1710  int ret;
1711 
1712  /* mark & release old frames */
1713  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1714  s->last_picture_ptr != s->next_picture_ptr &&
1715  s->last_picture_ptr->f->buf[0]) {
1716  ff_mpeg_unref_picture(s->last_picture_ptr);
1717  }
1718 
1719  s->current_picture_ptr->f->pict_type = s->pict_type;
1720 
1721  ff_mpeg_unref_picture(&s->current_picture);
1722  if ((ret = ff_mpeg_ref_picture(&s->current_picture,
1723  s->current_picture_ptr)) < 0)
1724  return ret;
1725 
1726  if (s->pict_type != AV_PICTURE_TYPE_B) {
1727  s->last_picture_ptr = s->next_picture_ptr;
1728  s->next_picture_ptr = s->current_picture_ptr;
1729  }
1730 
1731  if (s->last_picture_ptr) {
1732  ff_mpeg_unref_picture(&s->last_picture);
1733  if (s->last_picture_ptr->f->buf[0] &&
1734  (ret = ff_mpeg_ref_picture(&s->last_picture,
1735  s->last_picture_ptr)) < 0)
1736  return ret;
1737  }
1738  if (s->next_picture_ptr) {
1739  ff_mpeg_unref_picture(&s->next_picture);
1740  if (s->next_picture_ptr->f->buf[0] &&
1741  (ret = ff_mpeg_ref_picture(&s->next_picture,
1742  s->next_picture_ptr)) < 0)
1743  return ret;
1744  }
1745 
1746  if (s->dct_error_sum) {
1747  av_assert2(s->noise_reduction && s->encoding);
1749  }
1750 
1751  return 0;
1752 }
1753 
1755  const AVFrame *pic_arg, int *got_packet)
1756 {
1758  int i, stuffing_count, ret;
1759  int context_count = s->slice_context_count;
1760 
1761  s->vbv_ignore_qmax = 0;
1762 
1763  s->picture_in_gop_number++;
1764 
1765  if (load_input_picture(s, pic_arg) < 0)
1766  return -1;
1767 
1768  if (select_input_picture(s) < 0) {
1769  return -1;
1770  }
1771 
1772  /* output? */
1773  if (s->new_picture->data[0]) {
1774  int growing_buffer = context_count == 1 && !s->data_partitioning;
1775  size_t pkt_size = 10000 + s->mb_width * s->mb_height *
1776  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1777  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1778  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_picture, &pkt_size);
1779  if (ret < 0)
1780  return ret;
1781  }
1782  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1783  return ret;
1785  if (s->mb_info) {
1786  s->mb_info_ptr = av_packet_new_side_data(pkt,
1788  s->mb_width*s->mb_height*12);
1789  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1790  }
1791 
1792  for (i = 0; i < context_count; i++) {
1793  int start_y = s->thread_context[i]->start_mb_y;
1794  int end_y = s->thread_context[i]-> end_mb_y;
1795  int h = s->mb_height;
1796  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1797  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1798 
1799  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1800  }
1801 
1802  s->pict_type = s->new_picture->pict_type;
1803  //emms_c();
1804  ret = frame_start(s);
1805  if (ret < 0)
1806  return ret;
1807 vbv_retry:
1808  ret = encode_picture(s);
1809  if (growing_buffer) {
1810  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1811  pkt->data = s->pb.buf;
1813  }
1814  if (ret < 0)
1815  return -1;
1816 
1817  frame_end(s);
1818 
1819  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1820  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1821 
1822  if (avctx->rc_buffer_size) {
1823  RateControlContext *rcc = &s->rc_context;
1824  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1825  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1826  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1827 
1828  if (put_bits_count(&s->pb) > max_size &&
1829  s->lambda < s->lmax) {
1830  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1831  (s->qscale + 1) / s->qscale);
1832  if (s->adaptive_quant) {
1833  int i;
1834  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1835  s->lambda_table[i] =
1836  FFMAX(s->lambda_table[i] + min_step,
1837  s->lambda_table[i] * (s->qscale + 1) /
1838  s->qscale);
1839  }
1840  s->mb_skipped = 0; // done in frame_start()
1841  // done in encode_picture() so we must undo it
1842  if (s->pict_type == AV_PICTURE_TYPE_P) {
1843  if (s->flipflop_rounding ||
1844  s->codec_id == AV_CODEC_ID_H263P ||
1845  s->codec_id == AV_CODEC_ID_MPEG4)
1846  s->no_rounding ^= 1;
1847  }
1848  if (s->pict_type != AV_PICTURE_TYPE_B) {
1849  s->time_base = s->last_time_base;
1850  s->last_non_b_time = s->time - s->pp_time;
1851  }
1852  for (i = 0; i < context_count; i++) {
1853  PutBitContext *pb = &s->thread_context[i]->pb;
1854  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1855  }
1856  s->vbv_ignore_qmax = 1;
1857  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1858  goto vbv_retry;
1859  }
1860 
1862  }
1863 
1866 
1867  for (i = 0; i < 4; i++) {
1868  avctx->error[i] += s->encoding_error[i];
1869  }
1870  ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1871  s->encoding_error,
1873  s->pict_type);
1874 
1876  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1877  s->misc_bits + s->i_tex_bits +
1878  s->p_tex_bits);
1879  flush_put_bits(&s->pb);
1880  s->frame_bits = put_bits_count(&s->pb);
1881 
1882  stuffing_count = ff_vbv_update(s, s->frame_bits);
1883  s->stuffing_bits = 8*stuffing_count;
1884  if (stuffing_count) {
1885  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1886  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1887  return -1;
1888  }
1889 
1890  switch (s->codec_id) {
1893  while (stuffing_count--) {
1894  put_bits(&s->pb, 8, 0);
1895  }
1896  break;
1897  case AV_CODEC_ID_MPEG4:
1898  put_bits(&s->pb, 16, 0);
1899  put_bits(&s->pb, 16, 0x1C3);
1900  stuffing_count -= 4;
1901  while (stuffing_count--) {
1902  put_bits(&s->pb, 8, 0xFF);
1903  }
1904  break;
1905  default:
1906  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1907  s->stuffing_bits = 0;
1908  }
1909  flush_put_bits(&s->pb);
1910  s->frame_bits = put_bits_count(&s->pb);
1911  }
1912 
1913  /* update MPEG-1/2 vbv_delay for CBR */
1914  if (avctx->rc_max_rate &&
1916  s->out_format == FMT_MPEG1 &&
1917  90000LL * (avctx->rc_buffer_size - 1) <=
1918  avctx->rc_max_rate * 0xFFFFLL) {
1919  AVCPBProperties *props;
1920  size_t props_size;
1921 
1922  int vbv_delay, min_delay;
1923  double inbits = avctx->rc_max_rate *
1925  int minbits = s->frame_bits - 8 *
1926  (s->vbv_delay_pos - 1);
1927  double bits = s->rc_context.buffer_index + minbits - inbits;
1928  uint8_t *const vbv_delay_ptr = s->pb.buf + s->vbv_delay_pos;
1929 
1930  if (bits < 0)
1932  "Internal error, negative bits\n");
1933 
1934  av_assert1(s->repeat_first_field == 0);
1935 
1936  vbv_delay = bits * 90000 / avctx->rc_max_rate;
1937  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1938  avctx->rc_max_rate;
1939 
1940  vbv_delay = FFMAX(vbv_delay, min_delay);
1941 
1942  av_assert0(vbv_delay < 0xFFFF);
1943 
1944  vbv_delay_ptr[0] &= 0xF8;
1945  vbv_delay_ptr[0] |= vbv_delay >> 13;
1946  vbv_delay_ptr[1] = vbv_delay >> 5;
1947  vbv_delay_ptr[2] &= 0x07;
1948  vbv_delay_ptr[2] |= vbv_delay << 3;
1949 
1950  props = av_cpb_properties_alloc(&props_size);
1951  if (!props)
1952  return AVERROR(ENOMEM);
1953  props->vbv_delay = vbv_delay * 300;
1954 
1956  (uint8_t*)props, props_size);
1957  if (ret < 0) {
1958  av_freep(&props);
1959  return ret;
1960  }
1961  }
1962  s->total_bits += s->frame_bits;
1963 
1964  pkt->pts = s->current_picture.f->pts;
1965  pkt->duration = s->current_picture.f->duration;
1966  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1967  if (!s->current_picture.coded_picture_number)
1968  pkt->dts = pkt->pts - s->dts_delta;
1969  else
1970  pkt->dts = s->reordered_pts;
1971  s->reordered_pts = pkt->pts;
1972  } else
1973  pkt->dts = pkt->pts;
1974 
1975  // the no-delay case is handled in generic code
1977  ret = ff_encode_reordered_opaque(avctx, pkt, s->current_picture.f);
1978  if (ret < 0)
1979  return ret;
1980  }
1981 
1982  if (s->current_picture.f->flags & AV_FRAME_FLAG_KEY)
1984  if (s->mb_info)
1986  } else {
1987  s->frame_bits = 0;
1988  }
1989 
1990  /* release non-reference frames */
1991  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1992  if (!s->picture[i].reference)
1993  ff_mpeg_unref_picture(&s->picture[i]);
1994  }
1995 
1996  av_assert1((s->frame_bits & 7) == 0);
1997 
1998  pkt->size = s->frame_bits / 8;
1999  *got_packet = !!pkt->size;
2000  return 0;
2001 }
2002 
2004  int n, int threshold)
2005 {
2006  static const char tab[64] = {
2007  3, 2, 2, 1, 1, 1, 1, 1,
2008  1, 1, 1, 1, 1, 1, 1, 1,
2009  1, 1, 1, 1, 1, 1, 1, 1,
2010  0, 0, 0, 0, 0, 0, 0, 0,
2011  0, 0, 0, 0, 0, 0, 0, 0,
2012  0, 0, 0, 0, 0, 0, 0, 0,
2013  0, 0, 0, 0, 0, 0, 0, 0,
2014  0, 0, 0, 0, 0, 0, 0, 0
2015  };
2016  int score = 0;
2017  int run = 0;
2018  int i;
2019  int16_t *block = s->block[n];
2020  const int last_index = s->block_last_index[n];
2021  int skip_dc;
2022 
2023  if (threshold < 0) {
2024  skip_dc = 0;
2025  threshold = -threshold;
2026  } else
2027  skip_dc = 1;
2028 
2029  /* Are all we could set to zero already zero? */
2030  if (last_index <= skip_dc - 1)
2031  return;
2032 
2033  for (i = 0; i <= last_index; i++) {
2034  const int j = s->intra_scantable.permutated[i];
2035  const int level = FFABS(block[j]);
2036  if (level == 1) {
2037  if (skip_dc && i == 0)
2038  continue;
2039  score += tab[run];
2040  run = 0;
2041  } else if (level > 1) {
2042  return;
2043  } else {
2044  run++;
2045  }
2046  }
2047  if (score >= threshold)
2048  return;
2049  for (i = skip_dc; i <= last_index; i++) {
2050  const int j = s->intra_scantable.permutated[i];
2051  block[j] = 0;
2052  }
2053  if (block[0])
2054  s->block_last_index[n] = 0;
2055  else
2056  s->block_last_index[n] = -1;
2057 }
2058 
2059 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2060  int last_index)
2061 {
2062  int i;
2063  const int maxlevel = s->max_qcoeff;
2064  const int minlevel = s->min_qcoeff;
2065  int overflow = 0;
2066 
2067  if (s->mb_intra) {
2068  i = 1; // skip clipping of intra dc
2069  } else
2070  i = 0;
2071 
2072  for (; i <= last_index; i++) {
2073  const int j = s->intra_scantable.permutated[i];
2074  int level = block[j];
2075 
2076  if (level > maxlevel) {
2077  level = maxlevel;
2078  overflow++;
2079  } else if (level < minlevel) {
2080  level = minlevel;
2081  overflow++;
2082  }
2083 
2084  block[j] = level;
2085  }
2086 
2087  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2088  av_log(s->avctx, AV_LOG_INFO,
2089  "warning, clipping %d dct coefficients to %d..%d\n",
2090  overflow, minlevel, maxlevel);
2091 }
2092 
2093 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2094 {
2095  int x, y;
2096  // FIXME optimize
2097  for (y = 0; y < 8; y++) {
2098  for (x = 0; x < 8; x++) {
2099  int x2, y2;
2100  int sum = 0;
2101  int sqr = 0;
2102  int count = 0;
2103 
2104  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2105  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2106  int v = ptr[x2 + y2 * stride];
2107  sum += v;
2108  sqr += v * v;
2109  count++;
2110  }
2111  }
2112  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2113  }
2114  }
2115 }
2116 
2118  int motion_x, int motion_y,
2119  int mb_block_height,
2120  int mb_block_width,
2121  int mb_block_count,
2122  int chroma_x_shift,
2123  int chroma_y_shift,
2124  int chroma_format)
2125 {
2126 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2127  * and neither of these encoders currently supports 444. */
2128 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2129  (s)->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2130  int16_t weight[12][64];
2131  int16_t orig[12][64];
2132  const int mb_x = s->mb_x;
2133  const int mb_y = s->mb_y;
2134  int i;
2135  int skip_dct[12];
2136  int dct_offset = s->linesize * 8; // default for progressive frames
2137  int uv_dct_offset = s->uvlinesize * 8;
2138  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2139  ptrdiff_t wrap_y, wrap_c;
2140 
2141  for (i = 0; i < mb_block_count; i++)
2142  skip_dct[i] = s->skipdct;
2143 
2144  if (s->adaptive_quant) {
2145  const int last_qp = s->qscale;
2146  const int mb_xy = mb_x + mb_y * s->mb_stride;
2147 
2148  s->lambda = s->lambda_table[mb_xy];
2149  update_qscale(s);
2150 
2151  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2152  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2153  s->dquant = s->qscale - last_qp;
2154 
2155  if (s->out_format == FMT_H263) {
2156  s->dquant = av_clip(s->dquant, -2, 2);
2157 
2158  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2159  if (!s->mb_intra) {
2160  if (s->pict_type == AV_PICTURE_TYPE_B) {
2161  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2162  s->dquant = 0;
2163  }
2164  if (s->mv_type == MV_TYPE_8X8)
2165  s->dquant = 0;
2166  }
2167  }
2168  }
2169  }
2170  ff_set_qscale(s, last_qp + s->dquant);
2171  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2172  ff_set_qscale(s, s->qscale + s->dquant);
2173 
2174  wrap_y = s->linesize;
2175  wrap_c = s->uvlinesize;
2176  ptr_y = s->new_picture->data[0] +
2177  (mb_y * 16 * wrap_y) + mb_x * 16;
2178  ptr_cb = s->new_picture->data[1] +
2179  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2180  ptr_cr = s->new_picture->data[2] +
2181  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2182 
2183  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2184  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2185  int cw = (s->width + chroma_x_shift) >> chroma_x_shift;
2186  int ch = (s->height + chroma_y_shift) >> chroma_y_shift;
2187  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2188  wrap_y, wrap_y,
2189  16, 16, mb_x * 16, mb_y * 16,
2190  s->width, s->height);
2191  ptr_y = ebuf;
2192  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2193  wrap_c, wrap_c,
2194  mb_block_width, mb_block_height,
2195  mb_x * mb_block_width, mb_y * mb_block_height,
2196  cw, ch);
2197  ptr_cb = ebuf + 16 * wrap_y;
2198  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2199  wrap_c, wrap_c,
2200  mb_block_width, mb_block_height,
2201  mb_x * mb_block_width, mb_y * mb_block_height,
2202  cw, ch);
2203  ptr_cr = ebuf + 16 * wrap_y + 16;
2204  }
2205 
2206  if (s->mb_intra) {
2207  if (INTERLACED_DCT(s)) {
2208  int progressive_score, interlaced_score;
2209 
2210  s->interlaced_dct = 0;
2211  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2212  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2213  NULL, wrap_y, 8) - 400;
2214 
2215  if (progressive_score > 0) {
2216  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2217  NULL, wrap_y * 2, 8) +
2218  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2219  NULL, wrap_y * 2, 8);
2220  if (progressive_score > interlaced_score) {
2221  s->interlaced_dct = 1;
2222 
2223  dct_offset = wrap_y;
2224  uv_dct_offset = wrap_c;
2225  wrap_y <<= 1;
2226  if (chroma_format == CHROMA_422 ||
2228  wrap_c <<= 1;
2229  }
2230  }
2231  }
2232 
2233  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2234  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2235  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2236  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2237 
2238  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2239  skip_dct[4] = 1;
2240  skip_dct[5] = 1;
2241  } else {
2242  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2243  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2244  if (chroma_format == CHROMA_422) {
2245  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2246  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2247  } else if (chroma_format == CHROMA_444) {
2248  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2249  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2250  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2251  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2252  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2253  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2254  }
2255  }
2256  } else {
2257  op_pixels_func (*op_pix)[4];
2258  qpel_mc_func (*op_qpix)[16];
2259  uint8_t *dest_y, *dest_cb, *dest_cr;
2260 
2261  dest_y = s->dest[0];
2262  dest_cb = s->dest[1];
2263  dest_cr = s->dest[2];
2264 
2265  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2266  op_pix = s->hdsp.put_pixels_tab;
2267  op_qpix = s->qdsp.put_qpel_pixels_tab;
2268  } else {
2269  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2270  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2271  }
2272 
2273  if (s->mv_dir & MV_DIR_FORWARD) {
2274  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2275  s->last_picture.f->data,
2276  op_pix, op_qpix);
2277  op_pix = s->hdsp.avg_pixels_tab;
2278  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2279  }
2280  if (s->mv_dir & MV_DIR_BACKWARD) {
2281  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2282  s->next_picture.f->data,
2283  op_pix, op_qpix);
2284  }
2285 
2286  if (INTERLACED_DCT(s)) {
2287  int progressive_score, interlaced_score;
2288 
2289  s->interlaced_dct = 0;
2290  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2291  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2292  ptr_y + wrap_y * 8,
2293  wrap_y, 8) - 400;
2294 
2295  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2296  progressive_score -= 400;
2297 
2298  if (progressive_score > 0) {
2299  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2300  wrap_y * 2, 8) +
2301  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2302  ptr_y + wrap_y,
2303  wrap_y * 2, 8);
2304 
2305  if (progressive_score > interlaced_score) {
2306  s->interlaced_dct = 1;
2307 
2308  dct_offset = wrap_y;
2309  uv_dct_offset = wrap_c;
2310  wrap_y <<= 1;
2311  if (chroma_format == CHROMA_422)
2312  wrap_c <<= 1;
2313  }
2314  }
2315  }
2316 
2317  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2318  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2319  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2320  dest_y + dct_offset, wrap_y);
2321  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2322  dest_y + dct_offset + 8, wrap_y);
2323 
2324  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2325  skip_dct[4] = 1;
2326  skip_dct[5] = 1;
2327  } else {
2328  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2329  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2330  if (!chroma_y_shift) { /* 422 */
2331  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2332  dest_cb + uv_dct_offset, wrap_c);
2333  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2334  dest_cr + uv_dct_offset, wrap_c);
2335  }
2336  }
2337  /* pre quantization */
2338  if (s->mc_mb_var[s->mb_stride * mb_y + mb_x] < 2 * s->qscale * s->qscale) {
2339  // FIXME optimize
2340  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2341  skip_dct[0] = 1;
2342  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2343  skip_dct[1] = 1;
2344  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2345  wrap_y, 8) < 20 * s->qscale)
2346  skip_dct[2] = 1;
2347  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2348  wrap_y, 8) < 20 * s->qscale)
2349  skip_dct[3] = 1;
2350  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2351  skip_dct[4] = 1;
2352  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2353  skip_dct[5] = 1;
2354  if (!chroma_y_shift) { /* 422 */
2355  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2356  dest_cb + uv_dct_offset,
2357  wrap_c, 8) < 20 * s->qscale)
2358  skip_dct[6] = 1;
2359  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2360  dest_cr + uv_dct_offset,
2361  wrap_c, 8) < 20 * s->qscale)
2362  skip_dct[7] = 1;
2363  }
2364  }
2365  }
2366 
2367  if (s->quantizer_noise_shaping) {
2368  if (!skip_dct[0])
2369  get_visual_weight(weight[0], ptr_y , wrap_y);
2370  if (!skip_dct[1])
2371  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2372  if (!skip_dct[2])
2373  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2374  if (!skip_dct[3])
2375  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2376  if (!skip_dct[4])
2377  get_visual_weight(weight[4], ptr_cb , wrap_c);
2378  if (!skip_dct[5])
2379  get_visual_weight(weight[5], ptr_cr , wrap_c);
2380  if (!chroma_y_shift) { /* 422 */
2381  if (!skip_dct[6])
2382  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2383  wrap_c);
2384  if (!skip_dct[7])
2385  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2386  wrap_c);
2387  }
2388  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2389  }
2390 
2391  /* DCT & quantize */
2392  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2393  {
2394  for (i = 0; i < mb_block_count; i++) {
2395  if (!skip_dct[i]) {
2396  int overflow;
2397  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2398  // FIXME we could decide to change to quantizer instead of
2399  // clipping
2400  // JS: I don't think that would be a good idea it could lower
2401  // quality instead of improve it. Just INTRADC clipping
2402  // deserves changes in quantizer
2403  if (overflow)
2404  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2405  } else
2406  s->block_last_index[i] = -1;
2407  }
2408  if (s->quantizer_noise_shaping) {
2409  for (i = 0; i < mb_block_count; i++) {
2410  if (!skip_dct[i]) {
2411  s->block_last_index[i] =
2412  dct_quantize_refine(s, s->block[i], weight[i],
2413  orig[i], i, s->qscale);
2414  }
2415  }
2416  }
2417 
2418  if (s->luma_elim_threshold && !s->mb_intra)
2419  for (i = 0; i < 4; i++)
2420  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2421  if (s->chroma_elim_threshold && !s->mb_intra)
2422  for (i = 4; i < mb_block_count; i++)
2423  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2424 
2425  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2426  for (i = 0; i < mb_block_count; i++) {
2427  if (s->block_last_index[i] == -1)
2428  s->coded_score[i] = INT_MAX / 256;
2429  }
2430  }
2431  }
2432 
2433  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2434  s->block_last_index[4] =
2435  s->block_last_index[5] = 0;
2436  s->block[4][0] =
2437  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2438  if (!chroma_y_shift) { /* 422 / 444 */
2439  for (i=6; i<12; i++) {
2440  s->block_last_index[i] = 0;
2441  s->block[i][0] = s->block[4][0];
2442  }
2443  }
2444  }
2445 
2446  // non c quantize code returns incorrect block_last_index FIXME
2447  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2448  for (i = 0; i < mb_block_count; i++) {
2449  int j;
2450  if (s->block_last_index[i] > 0) {
2451  for (j = 63; j > 0; j--) {
2452  if (s->block[i][s->intra_scantable.permutated[j]])
2453  break;
2454  }
2455  s->block_last_index[i] = j;
2456  }
2457  }
2458  }
2459 
2460  /* huffman encode */
2461  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2464  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2465  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2466  break;
2467  case AV_CODEC_ID_MPEG4:
2468  if (CONFIG_MPEG4_ENCODER)
2469  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2470  break;
2471  case AV_CODEC_ID_MSMPEG4V2:
2472  case AV_CODEC_ID_MSMPEG4V3:
2473  case AV_CODEC_ID_WMV1:
2474  if (CONFIG_MSMPEG4ENC)
2475  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2476  break;
2477  case AV_CODEC_ID_WMV2:
2478  if (CONFIG_WMV2_ENCODER)
2479  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2480  break;
2481  case AV_CODEC_ID_H261:
2482  if (CONFIG_H261_ENCODER)
2483  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2484  break;
2485  case AV_CODEC_ID_H263:
2486  case AV_CODEC_ID_H263P:
2487  case AV_CODEC_ID_FLV1:
2488  case AV_CODEC_ID_RV10:
2489  case AV_CODEC_ID_RV20:
2490  if (CONFIG_H263_ENCODER)
2491  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2492  break;
2493 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2494  case AV_CODEC_ID_MJPEG:
2495  case AV_CODEC_ID_AMV:
2496  ff_mjpeg_encode_mb(s, s->block);
2497  break;
2498 #endif
2499  case AV_CODEC_ID_SPEEDHQ:
2500  if (CONFIG_SPEEDHQ_ENCODER)
2501  ff_speedhq_encode_mb(s, s->block);
2502  break;
2503  default:
2504  av_assert1(0);
2505  }
2506 }
2507 
2508 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2509 {
2510  if (s->chroma_format == CHROMA_420)
2511  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2512  else if (s->chroma_format == CHROMA_422)
2513  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2514  else
2515  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2516 }
2517 
2519  const MpegEncContext *s)
2520 {
2521  int i;
2522 
2523  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2524 
2525  /* MPEG-1 */
2526  d->mb_skip_run= s->mb_skip_run;
2527  for(i=0; i<3; i++)
2528  d->last_dc[i] = s->last_dc[i];
2529 
2530  /* statistics */
2531  d->mv_bits= s->mv_bits;
2532  d->i_tex_bits= s->i_tex_bits;
2533  d->p_tex_bits= s->p_tex_bits;
2534  d->i_count= s->i_count;
2535  d->skip_count= s->skip_count;
2536  d->misc_bits= s->misc_bits;
2537  d->last_bits= 0;
2538 
2539  d->mb_skipped= 0;
2540  d->qscale= s->qscale;
2541  d->dquant= s->dquant;
2542 
2543  d->esc3_level_length= s->esc3_level_length;
2544 }
2545 
2547  const MpegEncContext *s)
2548 {
2549  int i;
2550 
2551  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2552  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2553 
2554  /* MPEG-1 */
2555  d->mb_skip_run= s->mb_skip_run;
2556  for(i=0; i<3; i++)
2557  d->last_dc[i] = s->last_dc[i];
2558 
2559  /* statistics */
2560  d->mv_bits= s->mv_bits;
2561  d->i_tex_bits= s->i_tex_bits;
2562  d->p_tex_bits= s->p_tex_bits;
2563  d->i_count= s->i_count;
2564  d->skip_count= s->skip_count;
2565  d->misc_bits= s->misc_bits;
2566 
2567  d->mb_intra= s->mb_intra;
2568  d->mb_skipped= s->mb_skipped;
2569  d->mv_type= s->mv_type;
2570  d->mv_dir= s->mv_dir;
2571  d->pb= s->pb;
2572  if(s->data_partitioning){
2573  d->pb2= s->pb2;
2574  d->tex_pb= s->tex_pb;
2575  }
2576  d->block= s->block;
2577  for(i=0; i<8; i++)
2578  d->block_last_index[i]= s->block_last_index[i];
2579  d->interlaced_dct= s->interlaced_dct;
2580  d->qscale= s->qscale;
2581 
2582  d->esc3_level_length= s->esc3_level_length;
2583 }
2584 
2585 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best,
2587  int *dmin, int *next_block, int motion_x, int motion_y)
2588 {
2589  int score;
2590  uint8_t *dest_backup[3];
2591 
2592  copy_context_before_encode(s, backup);
2593 
2594  s->block= s->blocks[*next_block];
2595  s->pb= pb[*next_block];
2596  if(s->data_partitioning){
2597  s->pb2 = pb2 [*next_block];
2598  s->tex_pb= tex_pb[*next_block];
2599  }
2600 
2601  if(*next_block){
2602  memcpy(dest_backup, s->dest, sizeof(s->dest));
2603  s->dest[0] = s->sc.rd_scratchpad;
2604  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2605  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2606  av_assert0(s->linesize >= 32); //FIXME
2607  }
2608 
2609  encode_mb(s, motion_x, motion_y);
2610 
2611  score= put_bits_count(&s->pb);
2612  if(s->data_partitioning){
2613  score+= put_bits_count(&s->pb2);
2614  score+= put_bits_count(&s->tex_pb);
2615  }
2616 
2617  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2618  mpv_reconstruct_mb(s, s->block);
2619 
2620  score *= s->lambda2;
2621  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2622  }
2623 
2624  if(*next_block){
2625  memcpy(s->dest, dest_backup, sizeof(s->dest));
2626  }
2627 
2628  if(score<*dmin){
2629  *dmin= score;
2630  *next_block^=1;
2631 
2633  }
2634 }
2635 
2636 static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride){
2637  const uint32_t *sq = ff_square_tab + 256;
2638  int acc=0;
2639  int x,y;
2640 
2641  if(w==16 && h==16)
2642  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2643  else if(w==8 && h==8)
2644  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2645 
2646  for(y=0; y<h; y++){
2647  for(x=0; x<w; x++){
2648  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2649  }
2650  }
2651 
2652  av_assert2(acc>=0);
2653 
2654  return acc;
2655 }
2656 
2657 static int sse_mb(MpegEncContext *s){
2658  int w= 16;
2659  int h= 16;
2660  int chroma_mb_w = w >> s->chroma_x_shift;
2661  int chroma_mb_h = h >> s->chroma_y_shift;
2662 
2663  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2664  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2665 
2666  if(w==16 && h==16)
2667  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2668  return s->mecc.nsse[0](s, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2669  s->dest[0], s->linesize, 16) +
2670  s->mecc.nsse[1](s, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2671  s->dest[1], s->uvlinesize, chroma_mb_h) +
2672  s->mecc.nsse[1](s, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2673  s->dest[2], s->uvlinesize, chroma_mb_h);
2674  }else{
2675  return s->mecc.sse[0](NULL, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2676  s->dest[0], s->linesize, 16) +
2677  s->mecc.sse[1](NULL, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2678  s->dest[1], s->uvlinesize, chroma_mb_h) +
2679  s->mecc.sse[1](NULL, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2680  s->dest[2], s->uvlinesize, chroma_mb_h);
2681  }
2682  else
2683  return sse(s, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2684  s->dest[0], w, h, s->linesize) +
2685  sse(s, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2686  s->dest[1], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize) +
2687  sse(s, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2688  s->dest[2], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize);
2689 }
2690 
2692  MpegEncContext *s= *(void**)arg;
2693 
2694 
2695  s->me.pre_pass=1;
2696  s->me.dia_size= s->avctx->pre_dia_size;
2697  s->first_slice_line=1;
2698  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2699  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2700  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2701  }
2702  s->first_slice_line=0;
2703  }
2704 
2705  s->me.pre_pass=0;
2706 
2707  return 0;
2708 }
2709 
2711  MpegEncContext *s= *(void**)arg;
2712 
2713  s->me.dia_size= s->avctx->dia_size;
2714  s->first_slice_line=1;
2715  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2716  s->mb_x=0; //for block init below
2718  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2719  s->block_index[0]+=2;
2720  s->block_index[1]+=2;
2721  s->block_index[2]+=2;
2722  s->block_index[3]+=2;
2723 
2724  /* compute motion vector & mb_type and store in context */
2725  if(s->pict_type==AV_PICTURE_TYPE_B)
2726  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2727  else
2728  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2729  }
2730  s->first_slice_line=0;
2731  }
2732  return 0;
2733 }
2734 
2735 static int mb_var_thread(AVCodecContext *c, void *arg){
2736  MpegEncContext *s= *(void**)arg;
2737  int mb_x, mb_y;
2738 
2739  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2740  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2741  int xx = mb_x * 16;
2742  int yy = mb_y * 16;
2743  const uint8_t *pix = s->new_picture->data[0] + (yy * s->linesize) + xx;
2744  int varc;
2745  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2746 
2747  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2748  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2749 
2750  s->mb_var [s->mb_stride * mb_y + mb_x] = varc;
2751  s->mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2752  s->me.mb_var_sum_temp += varc;
2753  }
2754  }
2755  return 0;
2756 }
2757 
2759  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2760  if(s->partitioned_frame){
2762  }
2763 
2764  ff_mpeg4_stuffing(&s->pb);
2765  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2766  s->out_format == FMT_MJPEG) {
2768  } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2770  }
2771 
2772  flush_put_bits(&s->pb);
2773 
2774  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2775  s->misc_bits+= get_bits_diff(s);
2776 }
2777 
2779 {
2780  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2781  int offset = put_bits_count(&s->pb);
2782  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2783  int gobn = s->mb_y / s->gob_index;
2784  int pred_x, pred_y;
2785  if (CONFIG_H263_ENCODER)
2786  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2787  bytestream_put_le32(&ptr, offset);
2788  bytestream_put_byte(&ptr, s->qscale);
2789  bytestream_put_byte(&ptr, gobn);
2790  bytestream_put_le16(&ptr, mba);
2791  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2792  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2793  /* 4MV not implemented */
2794  bytestream_put_byte(&ptr, 0); /* hmv2 */
2795  bytestream_put_byte(&ptr, 0); /* vmv2 */
2796 }
2797 
2798 static void update_mb_info(MpegEncContext *s, int startcode)
2799 {
2800  if (!s->mb_info)
2801  return;
2802  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2803  s->mb_info_size += 12;
2804  s->prev_mb_info = s->last_mb_info;
2805  }
2806  if (startcode) {
2807  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2808  /* This might have incremented mb_info_size above, and we return without
2809  * actually writing any info into that slot yet. But in that case,
2810  * this will be called again at the start of the after writing the
2811  * start code, actually writing the mb info. */
2812  return;
2813  }
2814 
2815  s->last_mb_info = put_bytes_count(&s->pb, 0);
2816  if (!s->mb_info_size)
2817  s->mb_info_size += 12;
2818  write_mb_info(s);
2819 }
2820 
2821 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2822 {
2823  if (put_bytes_left(&s->pb, 0) < threshold
2824  && s->slice_context_count == 1
2825  && s->pb.buf == s->avctx->internal->byte_buffer) {
2826  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2827 
2828  uint8_t *new_buffer = NULL;
2829  int new_buffer_size = 0;
2830 
2831  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2832  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2833  return AVERROR(ENOMEM);
2834  }
2835 
2836  emms_c();
2837 
2838  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2839  s->avctx->internal->byte_buffer_size + size_increase);
2840  if (!new_buffer)
2841  return AVERROR(ENOMEM);
2842 
2843  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2844  av_free(s->avctx->internal->byte_buffer);
2845  s->avctx->internal->byte_buffer = new_buffer;
2846  s->avctx->internal->byte_buffer_size = new_buffer_size;
2847  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2848  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2849  }
2850  if (put_bytes_left(&s->pb, 0) < threshold)
2851  return AVERROR(EINVAL);
2852  return 0;
2853 }
2854 
2855 static int encode_thread(AVCodecContext *c, void *arg){
2856  MpegEncContext *s= *(void**)arg;
2857  int mb_x, mb_y, mb_y_order;
2858  int chr_h= 16>>s->chroma_y_shift;
2859  int i, j;
2860  MpegEncContext best_s = { 0 }, backup_s;
2861  uint8_t bit_buf[2][MAX_MB_BYTES];
2862  uint8_t bit_buf2[2][MAX_MB_BYTES];
2863  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2864  PutBitContext pb[2], pb2[2], tex_pb[2];
2865 
2866  for(i=0; i<2; i++){
2867  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2868  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2869  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2870  }
2871 
2872  s->last_bits= put_bits_count(&s->pb);
2873  s->mv_bits=0;
2874  s->misc_bits=0;
2875  s->i_tex_bits=0;
2876  s->p_tex_bits=0;
2877  s->i_count=0;
2878  s->skip_count=0;
2879 
2880  for(i=0; i<3; i++){
2881  /* init last dc values */
2882  /* note: quant matrix value (8) is implied here */
2883  s->last_dc[i] = 128 << s->intra_dc_precision;
2884 
2885  s->encoding_error[i] = 0;
2886  }
2887  if(s->codec_id==AV_CODEC_ID_AMV){
2888  s->last_dc[0] = 128*8/13;
2889  s->last_dc[1] = 128*8/14;
2890  s->last_dc[2] = 128*8/14;
2891  }
2892  s->mb_skip_run = 0;
2893  memset(s->last_mv, 0, sizeof(s->last_mv));
2894 
2895  s->last_mv_dir = 0;
2896 
2897  switch(s->codec_id){
2898  case AV_CODEC_ID_H263:
2899  case AV_CODEC_ID_H263P:
2900  case AV_CODEC_ID_FLV1:
2901  if (CONFIG_H263_ENCODER)
2902  s->gob_index = H263_GOB_HEIGHT(s->height);
2903  break;
2904  case AV_CODEC_ID_MPEG4:
2905  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2907  break;
2908  }
2909 
2910  s->resync_mb_x=0;
2911  s->resync_mb_y=0;
2912  s->first_slice_line = 1;
2913  s->ptr_lastgob = s->pb.buf;
2914  for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2915  if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2916  int first_in_slice;
2917  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2918  if (first_in_slice && mb_y_order != s->start_mb_y)
2920  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2921  } else {
2922  mb_y = mb_y_order;
2923  }
2924  s->mb_x=0;
2925  s->mb_y= mb_y;
2926 
2927  ff_set_qscale(s, s->qscale);
2929 
2930  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2931  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2932  int mb_type= s->mb_type[xy];
2933 // int d;
2934  int dmin= INT_MAX;
2935  int dir;
2936  int size_increase = s->avctx->internal->byte_buffer_size/4
2937  + s->mb_width*MAX_MB_BYTES;
2938 
2940  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2941  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2942  return -1;
2943  }
2944  if(s->data_partitioning){
2945  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2946  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2947  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2948  return -1;
2949  }
2950  }
2951 
2952  s->mb_x = mb_x;
2953  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2954  ff_update_block_index(s, 8, 0, s->chroma_x_shift);
2955 
2956  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2958  xy= s->mb_y*s->mb_stride + s->mb_x;
2959  mb_type= s->mb_type[xy];
2960  }
2961 
2962  /* write gob / video packet header */
2963  if(s->rtp_mode){
2964  int current_packet_size, is_gob_start;
2965 
2966  current_packet_size = put_bytes_count(&s->pb, 1)
2967  - (s->ptr_lastgob - s->pb.buf);
2968 
2969  is_gob_start = s->rtp_payload_size &&
2970  current_packet_size >= s->rtp_payload_size &&
2971  mb_y + mb_x > 0;
2972 
2973  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2974 
2975  switch(s->codec_id){
2976  case AV_CODEC_ID_H263:
2977  case AV_CODEC_ID_H263P:
2978  if(!s->h263_slice_structured)
2979  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2980  break;
2982  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2984  if(s->mb_skip_run) is_gob_start=0;
2985  break;
2986  case AV_CODEC_ID_MJPEG:
2987  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2988  break;
2989  }
2990 
2991  if(is_gob_start){
2992  if(s->start_mb_y != mb_y || mb_x!=0){
2993  write_slice_end(s);
2994 
2995  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2997  }
2998  }
2999 
3000  av_assert2((put_bits_count(&s->pb)&7) == 0);
3001  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3002 
3003  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3004  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3005  int d = 100 / s->error_rate;
3006  if(r % d == 0){
3007  current_packet_size=0;
3008  s->pb.buf_ptr= s->ptr_lastgob;
3009  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3010  }
3011  }
3012 
3013  switch(s->codec_id){
3014  case AV_CODEC_ID_MPEG4:
3015  if (CONFIG_MPEG4_ENCODER) {
3018  }
3019  break;
3022  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3025  }
3026  break;
3027  case AV_CODEC_ID_H263:
3028  case AV_CODEC_ID_H263P:
3029  if (CONFIG_H263_ENCODER) {
3030  update_mb_info(s, 1);
3032  }
3033  break;
3034  }
3035 
3036  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3037  int bits= put_bits_count(&s->pb);
3038  s->misc_bits+= bits - s->last_bits;
3039  s->last_bits= bits;
3040  }
3041 
3042  s->ptr_lastgob += current_packet_size;
3043  s->first_slice_line=1;
3044  s->resync_mb_x=mb_x;
3045  s->resync_mb_y=mb_y;
3046  }
3047  }
3048 
3049  if( (s->resync_mb_x == s->mb_x)
3050  && s->resync_mb_y+1 == s->mb_y){
3051  s->first_slice_line=0;
3052  }
3053 
3054  s->mb_skipped=0;
3055  s->dquant=0; //only for QP_RD
3056 
3057  update_mb_info(s, 0);
3058 
3059  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3060  int next_block=0;
3061  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3062 
3063  copy_context_before_encode(&backup_s, s);
3064  backup_s.pb= s->pb;
3065  best_s.data_partitioning= s->data_partitioning;
3066  best_s.partitioned_frame= s->partitioned_frame;
3067  if(s->data_partitioning){
3068  backup_s.pb2= s->pb2;
3069  backup_s.tex_pb= s->tex_pb;
3070  }
3071 
3073  s->mv_dir = MV_DIR_FORWARD;
3074  s->mv_type = MV_TYPE_16X16;
3075  s->mb_intra= 0;
3076  s->mv[0][0][0] = s->p_mv_table[xy][0];
3077  s->mv[0][0][1] = s->p_mv_table[xy][1];
3078  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3079  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3080  }
3082  s->mv_dir = MV_DIR_FORWARD;
3083  s->mv_type = MV_TYPE_FIELD;
3084  s->mb_intra= 0;
3085  for(i=0; i<2; i++){
3086  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3087  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3088  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3089  }
3090  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3091  &dmin, &next_block, 0, 0);
3092  }
3094  s->mv_dir = MV_DIR_FORWARD;
3095  s->mv_type = MV_TYPE_16X16;
3096  s->mb_intra= 0;
3097  s->mv[0][0][0] = 0;
3098  s->mv[0][0][1] = 0;
3099  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3100  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3101  }
3103  s->mv_dir = MV_DIR_FORWARD;
3104  s->mv_type = MV_TYPE_8X8;
3105  s->mb_intra= 0;
3106  for(i=0; i<4; i++){
3107  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3108  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3109  }
3110  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3111  &dmin, &next_block, 0, 0);
3112  }
3114  s->mv_dir = MV_DIR_FORWARD;
3115  s->mv_type = MV_TYPE_16X16;
3116  s->mb_intra= 0;
3117  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3118  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3119  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3120  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3121  }
3123  s->mv_dir = MV_DIR_BACKWARD;
3124  s->mv_type = MV_TYPE_16X16;
3125  s->mb_intra= 0;
3126  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3127  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3128  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3129  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3130  }
3132  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3133  s->mv_type = MV_TYPE_16X16;
3134  s->mb_intra= 0;
3135  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3136  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3137  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3138  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3139  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3140  &dmin, &next_block, 0, 0);
3141  }
3143  s->mv_dir = MV_DIR_FORWARD;
3144  s->mv_type = MV_TYPE_FIELD;
3145  s->mb_intra= 0;
3146  for(i=0; i<2; i++){
3147  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3148  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3149  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3150  }
3151  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3152  &dmin, &next_block, 0, 0);
3153  }
3155  s->mv_dir = MV_DIR_BACKWARD;
3156  s->mv_type = MV_TYPE_FIELD;
3157  s->mb_intra= 0;
3158  for(i=0; i<2; i++){
3159  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3160  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3161  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3162  }
3163  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3164  &dmin, &next_block, 0, 0);
3165  }
3167  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3168  s->mv_type = MV_TYPE_FIELD;
3169  s->mb_intra= 0;
3170  for(dir=0; dir<2; dir++){
3171  for(i=0; i<2; i++){
3172  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3173  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3174  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3175  }
3176  }
3177  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3178  &dmin, &next_block, 0, 0);
3179  }
3181  s->mv_dir = 0;
3182  s->mv_type = MV_TYPE_16X16;
3183  s->mb_intra= 1;
3184  s->mv[0][0][0] = 0;
3185  s->mv[0][0][1] = 0;
3186  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3187  &dmin, &next_block, 0, 0);
3188  if(s->h263_pred || s->h263_aic){
3189  if(best_s.mb_intra)
3190  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3191  else
3192  ff_clean_intra_table_entries(s); //old mode?
3193  }
3194  }
3195 
3196  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3197  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3198  const int last_qp= backup_s.qscale;
3199  int qpi, qp, dc[6];
3200  int16_t ac[6][16];
3201  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3202  static const int dquant_tab[4]={-1,1,-2,2};
3203  int storecoefs = s->mb_intra && s->dc_val[0];
3204 
3205  av_assert2(backup_s.dquant == 0);
3206 
3207  //FIXME intra
3208  s->mv_dir= best_s.mv_dir;
3209  s->mv_type = MV_TYPE_16X16;
3210  s->mb_intra= best_s.mb_intra;
3211  s->mv[0][0][0] = best_s.mv[0][0][0];
3212  s->mv[0][0][1] = best_s.mv[0][0][1];
3213  s->mv[1][0][0] = best_s.mv[1][0][0];
3214  s->mv[1][0][1] = best_s.mv[1][0][1];
3215 
3216  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3217  for(; qpi<4; qpi++){
3218  int dquant= dquant_tab[qpi];
3219  qp= last_qp + dquant;
3220  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3221  continue;
3222  backup_s.dquant= dquant;
3223  if(storecoefs){
3224  for(i=0; i<6; i++){
3225  dc[i]= s->dc_val[0][ s->block_index[i] ];
3226  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3227  }
3228  }
3229 
3230  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3231  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3232  if(best_s.qscale != qp){
3233  if(storecoefs){
3234  for(i=0; i<6; i++){
3235  s->dc_val[0][ s->block_index[i] ]= dc[i];
3236  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3237  }
3238  }
3239  }
3240  }
3241  }
3242  }
3243  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3244  int mx= s->b_direct_mv_table[xy][0];
3245  int my= s->b_direct_mv_table[xy][1];
3246 
3247  backup_s.dquant = 0;
3248  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3249  s->mb_intra= 0;
3250  ff_mpeg4_set_direct_mv(s, mx, my);
3251  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3252  &dmin, &next_block, mx, my);
3253  }
3254  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3255  backup_s.dquant = 0;
3256  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3257  s->mb_intra= 0;
3258  ff_mpeg4_set_direct_mv(s, 0, 0);
3259  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3260  &dmin, &next_block, 0, 0);
3261  }
3262  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3263  int coded=0;
3264  for(i=0; i<6; i++)
3265  coded |= s->block_last_index[i];
3266  if(coded){
3267  int mx,my;
3268  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3269  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3270  mx=my=0; //FIXME find the one we actually used
3271  ff_mpeg4_set_direct_mv(s, mx, my);
3272  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3273  mx= s->mv[1][0][0];
3274  my= s->mv[1][0][1];
3275  }else{
3276  mx= s->mv[0][0][0];
3277  my= s->mv[0][0][1];
3278  }
3279 
3280  s->mv_dir= best_s.mv_dir;
3281  s->mv_type = best_s.mv_type;
3282  s->mb_intra= 0;
3283 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3284  s->mv[0][0][1] = best_s.mv[0][0][1];
3285  s->mv[1][0][0] = best_s.mv[1][0][0];
3286  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3287  backup_s.dquant= 0;
3288  s->skipdct=1;
3289  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3290  &dmin, &next_block, mx, my);
3291  s->skipdct=0;
3292  }
3293  }
3294 
3295  s->current_picture.qscale_table[xy] = best_s.qscale;
3296 
3297  copy_context_after_encode(s, &best_s);
3298 
3299  pb_bits_count= put_bits_count(&s->pb);
3300  flush_put_bits(&s->pb);
3301  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3302  s->pb= backup_s.pb;
3303 
3304  if(s->data_partitioning){
3305  pb2_bits_count= put_bits_count(&s->pb2);
3306  flush_put_bits(&s->pb2);
3307  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3308  s->pb2= backup_s.pb2;
3309 
3310  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3311  flush_put_bits(&s->tex_pb);
3312  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3313  s->tex_pb= backup_s.tex_pb;
3314  }
3315  s->last_bits= put_bits_count(&s->pb);
3316 
3317  if (CONFIG_H263_ENCODER &&
3318  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3320 
3321  if(next_block==0){ //FIXME 16 vs linesize16
3322  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3323  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3324  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3325  }
3326 
3327  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3328  mpv_reconstruct_mb(s, s->block);
3329  } else {
3330  int motion_x = 0, motion_y = 0;
3331  s->mv_type=MV_TYPE_16X16;
3332  // only one MB-Type possible
3333 
3334  switch(mb_type){
3336  s->mv_dir = 0;
3337  s->mb_intra= 1;
3338  motion_x= s->mv[0][0][0] = 0;
3339  motion_y= s->mv[0][0][1] = 0;
3340  break;
3342  s->mv_dir = MV_DIR_FORWARD;
3343  s->mb_intra= 0;
3344  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3345  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3346  break;
3348  s->mv_dir = MV_DIR_FORWARD;
3349  s->mv_type = MV_TYPE_FIELD;
3350  s->mb_intra= 0;
3351  for(i=0; i<2; i++){
3352  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3353  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3354  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3355  }
3356  break;
3358  s->mv_dir = MV_DIR_FORWARD;
3359  s->mv_type = MV_TYPE_8X8;
3360  s->mb_intra= 0;
3361  for(i=0; i<4; i++){
3362  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3363  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3364  }
3365  break;
3367  if (CONFIG_MPEG4_ENCODER) {
3369  s->mb_intra= 0;
3370  motion_x=s->b_direct_mv_table[xy][0];
3371  motion_y=s->b_direct_mv_table[xy][1];
3372  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3373  }
3374  break;
3376  if (CONFIG_MPEG4_ENCODER) {
3378  s->mb_intra= 0;
3379  ff_mpeg4_set_direct_mv(s, 0, 0);
3380  }
3381  break;
3383  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3384  s->mb_intra= 0;
3385  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3386  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3387  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3388  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3389  break;
3391  s->mv_dir = MV_DIR_BACKWARD;
3392  s->mb_intra= 0;
3393  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3394  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3395  break;
3397  s->mv_dir = MV_DIR_FORWARD;
3398  s->mb_intra= 0;
3399  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3400  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3401  break;
3403  s->mv_dir = MV_DIR_FORWARD;
3404  s->mv_type = MV_TYPE_FIELD;
3405  s->mb_intra= 0;
3406  for(i=0; i<2; i++){
3407  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3408  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3409  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3410  }
3411  break;
3413  s->mv_dir = MV_DIR_BACKWARD;
3414  s->mv_type = MV_TYPE_FIELD;
3415  s->mb_intra= 0;
3416  for(i=0; i<2; i++){
3417  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3418  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3419  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3420  }
3421  break;
3423  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3424  s->mv_type = MV_TYPE_FIELD;
3425  s->mb_intra= 0;
3426  for(dir=0; dir<2; dir++){
3427  for(i=0; i<2; i++){
3428  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3429  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3430  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3431  }
3432  }
3433  break;
3434  default:
3435  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3436  }
3437 
3438  encode_mb(s, motion_x, motion_y);
3439 
3440  // RAL: Update last macroblock type
3441  s->last_mv_dir = s->mv_dir;
3442 
3443  if (CONFIG_H263_ENCODER &&
3444  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3446 
3447  mpv_reconstruct_mb(s, s->block);
3448  }
3449 
3450  /* clean the MV table in IPS frames for direct mode in B-frames */
3451  if(s->mb_intra /* && I,P,S_TYPE */){
3452  s->p_mv_table[xy][0]=0;
3453  s->p_mv_table[xy][1]=0;
3454  }
3455 
3456  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3457  int w= 16;
3458  int h= 16;
3459 
3460  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3461  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3462 
3463  s->encoding_error[0] += sse(
3464  s, s->new_picture->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3465  s->dest[0], w, h, s->linesize);
3466  s->encoding_error[1] += sse(
3467  s, s->new_picture->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3468  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3469  s->encoding_error[2] += sse(
3470  s, s->new_picture->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3471  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3472  }
3473  if(s->loop_filter){
3474  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3476  }
3477  ff_dlog(s->avctx, "MB %d %d bits\n",
3478  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3479  }
3480  }
3481 
3482  //not beautiful here but we must write it before flushing so it has to be here
3483  if (CONFIG_MSMPEG4ENC && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3485 
3486  write_slice_end(s);
3487 
3488  return 0;
3489 }
3490 
3491 #define MERGE(field) dst->field += src->field; src->field=0
3493  MERGE(me.scene_change_score);
3494  MERGE(me.mc_mb_var_sum_temp);
3495  MERGE(me.mb_var_sum_temp);
3496 }
3497 
3499  int i;
3500 
3501  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3502  MERGE(dct_count[1]);
3503  MERGE(mv_bits);
3504  MERGE(i_tex_bits);
3505  MERGE(p_tex_bits);
3506  MERGE(i_count);
3507  MERGE(skip_count);
3508  MERGE(misc_bits);
3509  MERGE(encoding_error[0]);
3510  MERGE(encoding_error[1]);
3511  MERGE(encoding_error[2]);
3512 
3513  if (dst->noise_reduction){
3514  for(i=0; i<64; i++){
3515  MERGE(dct_error_sum[0][i]);
3516  MERGE(dct_error_sum[1][i]);
3517  }
3518  }
3519 
3520  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3521  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3522  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3523  flush_put_bits(&dst->pb);
3524 }
3525 
3526 static int estimate_qp(MpegEncContext *s, int dry_run){
3527  if (s->next_lambda){
3528  s->current_picture_ptr->f->quality =
3529  s->current_picture.f->quality = s->next_lambda;
3530  if(!dry_run) s->next_lambda= 0;
3531  } else if (!s->fixed_qscale) {
3532  int quality = ff_rate_estimate_qscale(s, dry_run);
3533  s->current_picture_ptr->f->quality =
3534  s->current_picture.f->quality = quality;
3535  if (s->current_picture.f->quality < 0)
3536  return -1;
3537  }
3538 
3539  if(s->adaptive_quant){
3540  switch(s->codec_id){
3541  case AV_CODEC_ID_MPEG4:
3542  if (CONFIG_MPEG4_ENCODER)
3544  break;
3545  case AV_CODEC_ID_H263:
3546  case AV_CODEC_ID_H263P:
3547  case AV_CODEC_ID_FLV1:
3548  if (CONFIG_H263_ENCODER)
3550  break;
3551  default:
3553  }
3554 
3555  s->lambda= s->lambda_table[0];
3556  //FIXME broken
3557  }else
3558  s->lambda = s->current_picture.f->quality;
3559  update_qscale(s);
3560  return 0;
3561 }
3562 
3563 /* must be called before writing the header */
3565  av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3566  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3567 
3568  if(s->pict_type==AV_PICTURE_TYPE_B){
3569  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3570  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3571  }else{
3572  s->pp_time= s->time - s->last_non_b_time;
3573  s->last_non_b_time= s->time;
3574  av_assert1(s->picture_number==0 || s->pp_time > 0);
3575  }
3576 }
3577 
3579 {
3580  int i, ret;
3581  int bits;
3582  int context_count = s->slice_context_count;
3583 
3584  /* Reset the average MB variance */
3585  s->me.mb_var_sum_temp =
3586  s->me.mc_mb_var_sum_temp = 0;
3587 
3588  /* we need to initialize some time vars before we can encode B-frames */
3589  // RAL: Condition added for MPEG1VIDEO
3590  if (s->out_format == FMT_MPEG1 || (s->h263_pred && !s->msmpeg4_version))
3592  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3594 
3595  s->me.scene_change_score=0;
3596 
3597 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3598 
3599  if(s->pict_type==AV_PICTURE_TYPE_I){
3600  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3601  else s->no_rounding=0;
3602  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3603  if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3604  s->no_rounding ^= 1;
3605  }
3606 
3607  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3608  if (estimate_qp(s,1) < 0)
3609  return -1;
3611  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3612  if(s->pict_type==AV_PICTURE_TYPE_B)
3613  s->lambda= s->last_lambda_for[s->pict_type];
3614  else
3615  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3616  update_qscale(s);
3617  }
3618 
3619  if (s->out_format != FMT_MJPEG) {
3620  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3621  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3622  s->q_chroma_intra_matrix = s->q_intra_matrix;
3623  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3624  }
3625 
3626  s->mb_intra=0; //for the rate distortion & bit compare functions
3627  for(i=1; i<context_count; i++){
3628  ret = ff_update_duplicate_context(s->thread_context[i], s);
3629  if (ret < 0)
3630  return ret;
3631  }
3632 
3633  if(ff_init_me(s)<0)
3634  return -1;
3635 
3636  /* Estimate motion for every MB */
3637  if(s->pict_type != AV_PICTURE_TYPE_I){
3638  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3639  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3640  if (s->pict_type != AV_PICTURE_TYPE_B) {
3641  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3642  s->me_pre == 2) {
3643  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3644  }
3645  }
3646 
3647  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3648  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3649  /* I-Frame */
3650  for(i=0; i<s->mb_stride*s->mb_height; i++)
3651  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3652 
3653  if(!s->fixed_qscale){
3654  /* finding spatial complexity for I-frame rate control */
3655  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3656  }
3657  }
3658  for(i=1; i<context_count; i++){
3659  merge_context_after_me(s, s->thread_context[i]);
3660  }
3661  s->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3662  s->mb_var_sum = s->me. mb_var_sum_temp;
3663  emms_c();
3664 
3665  if (s->me.scene_change_score > s->scenechange_threshold &&
3666  s->pict_type == AV_PICTURE_TYPE_P) {
3667  s->pict_type= AV_PICTURE_TYPE_I;
3668  for(i=0; i<s->mb_stride*s->mb_height; i++)
3669  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3670  if(s->msmpeg4_version >= 3)
3671  s->no_rounding=1;
3672  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3673  s->mb_var_sum, s->mc_mb_var_sum);
3674  }
3675 
3676  if(!s->umvplus){
3677  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3678  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3679 
3680  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3681  int a,b;
3682  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3683  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3684  s->f_code= FFMAX3(s->f_code, a, b);
3685  }
3686 
3688  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3689  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3690  int j;
3691  for(i=0; i<2; i++){
3692  for(j=0; j<2; j++)
3693  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3694  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3695  }
3696  }
3697  } else if (s->pict_type == AV_PICTURE_TYPE_B) {
3698  int a, b;
3699 
3700  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3701  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3702  s->f_code = FFMAX(a, b);
3703 
3704  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3705  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3706  s->b_code = FFMAX(a, b);
3707 
3708  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3709  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3710  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3711  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3712  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3713  int dir, j;
3714  for(dir=0; dir<2; dir++){
3715  for(i=0; i<2; i++){
3716  for(j=0; j<2; j++){
3719  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3720  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3721  }
3722  }
3723  }
3724  }
3725  }
3726  }
3727 
3728  if (estimate_qp(s, 0) < 0)
3729  return -1;
3730 
3731  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3732  s->pict_type == AV_PICTURE_TYPE_I &&
3733  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3734  s->qscale= 3; //reduce clipping problems
3735 
3736  if (s->out_format == FMT_MJPEG) {
3737  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3738  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3739 
3740  if (s->avctx->intra_matrix) {
3741  chroma_matrix =
3742  luma_matrix = s->avctx->intra_matrix;
3743  }
3744  if (s->avctx->chroma_intra_matrix)
3745  chroma_matrix = s->avctx->chroma_intra_matrix;
3746 
3747  /* for mjpeg, we do include qscale in the matrix */
3748  for(i=1;i<64;i++){
3749  int j = s->idsp.idct_permutation[i];
3750 
3751  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3752  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3753  }
3754  s->y_dc_scale_table=
3755  s->c_dc_scale_table = ff_mpeg12_dc_scale_table[s->intra_dc_precision];
3756  s->chroma_intra_matrix[0] =
3757  s->intra_matrix[0] = ff_mpeg12_dc_scale_table[s->intra_dc_precision][8];
3758  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3759  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3760  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3761  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3762  s->qscale= 8;
3763 
3764  if (s->codec_id == AV_CODEC_ID_AMV) {
3765  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3766  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3767  for (int i = 1; i < 64; i++) {
3768  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
3769 
3770  s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3771  s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3772  }
3773  s->y_dc_scale_table = y;
3774  s->c_dc_scale_table = c;
3775  s->intra_matrix[0] = 13;
3776  s->chroma_intra_matrix[0] = 14;
3777  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3778  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3779  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3780  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3781  s->qscale = 8;
3782  }
3783  }
3784 
3785  //FIXME var duplication
3786  if (s->pict_type == AV_PICTURE_TYPE_I) {
3787  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_KEY; //FIXME pic_ptr
3788  s->current_picture.f->flags |= AV_FRAME_FLAG_KEY;
3789  } else {
3790  s->current_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY; //FIXME pic_ptr
3791  s->current_picture.f->flags &= ~AV_FRAME_FLAG_KEY;
3792  }
3793  s->current_picture_ptr->f->pict_type =
3794  s->current_picture.f->pict_type = s->pict_type;
3795 
3796  if (s->current_picture.f->flags & AV_FRAME_FLAG_KEY)
3797  s->picture_in_gop_number=0;
3798 
3799  s->mb_x = s->mb_y = 0;
3800  s->last_bits= put_bits_count(&s->pb);
3801  switch(s->out_format) {
3802 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3803  case FMT_MJPEG:
3805  break;
3806 #endif
3807  case FMT_SPEEDHQ:
3808  if (CONFIG_SPEEDHQ_ENCODER)
3810  break;
3811  case FMT_H261:
3812  if (CONFIG_H261_ENCODER)
3814  break;
3815  case FMT_H263:
3816  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3818  else if (CONFIG_MSMPEG4ENC && s->msmpeg4_version)
3820  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3822  if (ret < 0)
3823  return ret;
3824  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3826  if (ret < 0)
3827  return ret;
3828  }
3829  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3831  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3833  else if (CONFIG_H263_ENCODER)
3835  break;
3836  case FMT_MPEG1:
3837  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3839  break;
3840  default:
3841  av_assert0(0);
3842  }
3843  bits= put_bits_count(&s->pb);
3844  s->header_bits= bits - s->last_bits;
3845 
3846  for(i=1; i<context_count; i++){
3847  update_duplicate_context_after_me(s->thread_context[i], s);
3848  }
3849  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3850  for(i=1; i<context_count; i++){
3851  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3852  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3853  merge_context_after_encode(s, s->thread_context[i]);
3854  }
3855  emms_c();
3856  return 0;
3857 }
3858 
3859 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3860  const int intra= s->mb_intra;
3861  int i;
3862 
3863  s->dct_count[intra]++;
3864 
3865  for(i=0; i<64; i++){
3866  int level= block[i];
3867 
3868  if(level){
3869  if(level>0){
3870  s->dct_error_sum[intra][i] += level;
3871  level -= s->dct_offset[intra][i];
3872  if(level<0) level=0;
3873  }else{
3874  s->dct_error_sum[intra][i] -= level;
3875  level += s->dct_offset[intra][i];
3876  if(level>0) level=0;
3877  }
3878  block[i]= level;
3879  }
3880  }
3881 }
3882 
3884  int16_t *block, int n,
3885  int qscale, int *overflow){
3886  const int *qmat;
3887  const uint16_t *matrix;
3888  const uint8_t *scantable;
3889  const uint8_t *perm_scantable;
3890  int max=0;
3891  unsigned int threshold1, threshold2;
3892  int bias=0;
3893  int run_tab[65];
3894  int level_tab[65];
3895  int score_tab[65];
3896  int survivor[65];
3897  int survivor_count;
3898  int last_run=0;
3899  int last_level=0;
3900  int last_score= 0;
3901  int last_i;
3902  int coeff[2][64];
3903  int coeff_count[64];
3904  int qmul, qadd, start_i, last_non_zero, i, dc;
3905  const int esc_length= s->ac_esc_length;
3906  uint8_t * length;
3907  uint8_t * last_length;
3908  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3909  int mpeg2_qscale;
3910 
3911  s->fdsp.fdct(block);
3912 
3913  if(s->dct_error_sum)
3914  s->denoise_dct(s, block);
3915  qmul= qscale*16;
3916  qadd= ((qscale-1)|1)*8;
3917 
3918  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3919  else mpeg2_qscale = qscale << 1;
3920 
3921  if (s->mb_intra) {
3922  int q;
3923  scantable= s->intra_scantable.scantable;
3924  perm_scantable= s->intra_scantable.permutated;
3925  if (!s->h263_aic) {
3926  if (n < 4)
3927  q = s->y_dc_scale;
3928  else
3929  q = s->c_dc_scale;
3930  q = q << 3;
3931  } else{
3932  /* For AIC we skip quant/dequant of INTRADC */
3933  q = 1 << 3;
3934  qadd=0;
3935  }
3936 
3937  /* note: block[0] is assumed to be positive */
3938  block[0] = (block[0] + (q >> 1)) / q;
3939  start_i = 1;
3940  last_non_zero = 0;
3941  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3942  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3943  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3944  bias= 1<<(QMAT_SHIFT-1);
3945 
3946  if (n > 3 && s->intra_chroma_ac_vlc_length) {
3947  length = s->intra_chroma_ac_vlc_length;
3948  last_length= s->intra_chroma_ac_vlc_last_length;
3949  } else {
3950  length = s->intra_ac_vlc_length;
3951  last_length= s->intra_ac_vlc_last_length;
3952  }
3953  } else {
3954  scantable= s->inter_scantable.scantable;
3955  perm_scantable= s->inter_scantable.permutated;
3956  start_i = 0;
3957  last_non_zero = -1;
3958  qmat = s->q_inter_matrix[qscale];
3959  matrix = s->inter_matrix;
3960  length = s->inter_ac_vlc_length;
3961  last_length= s->inter_ac_vlc_last_length;
3962  }
3963  last_i= start_i;
3964 
3965  threshold1= (1<<QMAT_SHIFT) - bias - 1;
3966  threshold2= (threshold1<<1);
3967 
3968  for(i=63; i>=start_i; i--) {
3969  const int j = scantable[i];
3970  int level = block[j] * qmat[j];
3971 
3972  if(((unsigned)(level+threshold1))>threshold2){
3973  last_non_zero = i;
3974  break;
3975  }
3976  }
3977 
3978  for(i=start_i; i<=last_non_zero; i++) {
3979  const int j = scantable[i];
3980  int level = block[j] * qmat[j];
3981 
3982 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3983 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3984  if(((unsigned)(level+threshold1))>threshold2){
3985  if(level>0){
3986  level= (bias + level)>>QMAT_SHIFT;
3987  coeff[0][i]= level;
3988  coeff[1][i]= level-1;
3989 // coeff[2][k]= level-2;
3990  }else{
3991  level= (bias - level)>>QMAT_SHIFT;
3992  coeff[0][i]= -level;
3993  coeff[1][i]= -level+1;
3994 // coeff[2][k]= -level+2;
3995  }
3996  coeff_count[i]= FFMIN(level, 2);
3997  av_assert2(coeff_count[i]);
3998  max |=level;
3999  }else{
4000  coeff[0][i]= (level>>31)|1;
4001  coeff_count[i]= 1;
4002  }
4003  }
4004 
4005  *overflow= s->max_qcoeff < max; //overflow might have happened
4006 
4007  if(last_non_zero < start_i){
4008  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4009  return last_non_zero;
4010  }
4011 
4012  score_tab[start_i]= 0;
4013  survivor[0]= start_i;
4014  survivor_count= 1;
4015 
4016  for(i=start_i; i<=last_non_zero; i++){
4017  int level_index, j, zero_distortion;
4018  int dct_coeff= FFABS(block[ scantable[i] ]);
4019  int best_score=256*256*256*120;
4020 
4021  if (s->fdsp.fdct == ff_fdct_ifast)
4022  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4023  zero_distortion= dct_coeff*dct_coeff;
4024 
4025  for(level_index=0; level_index < coeff_count[i]; level_index++){
4026  int distortion;
4027  int level= coeff[level_index][i];
4028  const int alevel= FFABS(level);
4029  int unquant_coeff;
4030 
4031  av_assert2(level);
4032 
4033  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4034  unquant_coeff= alevel*qmul + qadd;
4035  } else if(s->out_format == FMT_MJPEG) {
4036  j = s->idsp.idct_permutation[scantable[i]];
4037  unquant_coeff = alevel * matrix[j] * 8;
4038  }else{ // MPEG-1
4039  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4040  if(s->mb_intra){
4041  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4042  unquant_coeff = (unquant_coeff - 1) | 1;
4043  }else{
4044  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4045  unquant_coeff = (unquant_coeff - 1) | 1;
4046  }
4047  unquant_coeff<<= 3;
4048  }
4049 
4050  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4051  level+=64;
4052  if((level&(~127)) == 0){
4053  for(j=survivor_count-1; j>=0; j--){
4054  int run= i - survivor[j];
4055  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4056  score += score_tab[i-run];
4057 
4058  if(score < best_score){
4059  best_score= score;
4060  run_tab[i+1]= run;
4061  level_tab[i+1]= level-64;
4062  }
4063  }
4064 
4065  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4066  for(j=survivor_count-1; j>=0; j--){
4067  int run= i - survivor[j];
4068  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4069  score += score_tab[i-run];
4070  if(score < last_score){
4071  last_score= score;
4072  last_run= run;
4073  last_level= level-64;
4074  last_i= i+1;
4075  }
4076  }
4077  }
4078  }else{
4079  distortion += esc_length*lambda;
4080  for(j=survivor_count-1; j>=0; j--){
4081  int run= i - survivor[j];
4082  int score= distortion + score_tab[i-run];
4083 
4084  if(score < best_score){
4085  best_score= score;
4086  run_tab[i+1]= run;
4087  level_tab[i+1]= level-64;
4088  }
4089  }
4090 
4091  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4092  for(j=survivor_count-1; j>=0; j--){
4093  int run= i - survivor[j];
4094  int score= distortion + score_tab[i-run];
4095  if(score < last_score){
4096  last_score= score;
4097  last_run= run;
4098  last_level= level-64;
4099  last_i= i+1;
4100  }
4101  }
4102  }
4103  }
4104  }
4105 
4106  score_tab[i+1]= best_score;
4107 
4108  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4109  if(last_non_zero <= 27){
4110  for(; survivor_count; survivor_count--){
4111  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4112  break;
4113  }
4114  }else{
4115  for(; survivor_count; survivor_count--){
4116  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4117  break;
4118  }
4119  }
4120 
4121  survivor[ survivor_count++ ]= i+1;
4122  }
4123 
4124  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4125  last_score= 256*256*256*120;
4126  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4127  int score= score_tab[i];
4128  if (i)
4129  score += lambda * 2; // FIXME more exact?
4130 
4131  if(score < last_score){
4132  last_score= score;
4133  last_i= i;
4134  last_level= level_tab[i];
4135  last_run= run_tab[i];
4136  }
4137  }
4138  }
4139 
4140  s->coded_score[n] = last_score;
4141 
4142  dc= FFABS(block[0]);
4143  last_non_zero= last_i - 1;
4144  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4145 
4146  if(last_non_zero < start_i)
4147  return last_non_zero;
4148 
4149  if(last_non_zero == 0 && start_i == 0){
4150  int best_level= 0;
4151  int best_score= dc * dc;
4152 
4153  for(i=0; i<coeff_count[0]; i++){
4154  int level= coeff[i][0];
4155  int alevel= FFABS(level);
4156  int unquant_coeff, score, distortion;
4157 
4158  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4159  unquant_coeff= (alevel*qmul + qadd)>>3;
4160  } else{ // MPEG-1
4161  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4162  unquant_coeff = (unquant_coeff - 1) | 1;
4163  }
4164  unquant_coeff = (unquant_coeff + 4) >> 3;
4165  unquant_coeff<<= 3 + 3;
4166 
4167  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4168  level+=64;
4169  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4170  else score= distortion + esc_length*lambda;
4171 
4172  if(score < best_score){
4173  best_score= score;
4174  best_level= level - 64;
4175  }
4176  }
4177  block[0]= best_level;
4178  s->coded_score[n] = best_score - dc*dc;
4179  if(best_level == 0) return -1;
4180  else return last_non_zero;
4181  }
4182 
4183  i= last_i;
4184  av_assert2(last_level);
4185 
4186  block[ perm_scantable[last_non_zero] ]= last_level;
4187  i -= last_run + 1;
4188 
4189  for(; i>start_i; i -= run_tab[i] + 1){
4190  block[ perm_scantable[i-1] ]= level_tab[i];
4191  }
4192 
4193  return last_non_zero;
4194 }
4195 
4196 static int16_t basis[64][64];
4197 
4198 static void build_basis(uint8_t *perm){
4199  int i, j, x, y;
4200  emms_c();
4201  for(i=0; i<8; i++){
4202  for(j=0; j<8; j++){
4203  for(y=0; y<8; y++){
4204  for(x=0; x<8; x++){
4205  double s= 0.25*(1<<BASIS_SHIFT);
4206  int index= 8*i + j;
4207  int perm_index= perm[index];
4208  if(i==0) s*= sqrt(0.5);
4209  if(j==0) s*= sqrt(0.5);
4210  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4211  }
4212  }
4213  }
4214  }
4215 }
4216 
4217 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4218  int16_t *block, int16_t *weight, int16_t *orig,
4219  int n, int qscale){
4220  int16_t rem[64];
4221  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4222  const uint8_t *scantable;
4223  const uint8_t *perm_scantable;
4224 // unsigned int threshold1, threshold2;
4225 // int bias=0;
4226  int run_tab[65];
4227  int prev_run=0;
4228  int prev_level=0;
4229  int qmul, qadd, start_i, last_non_zero, i, dc;
4230  uint8_t * length;
4231  uint8_t * last_length;
4232  int lambda;
4233  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4234 
4235  if(basis[0][0] == 0)
4236  build_basis(s->idsp.idct_permutation);
4237 
4238  qmul= qscale*2;
4239  qadd= (qscale-1)|1;
4240  if (s->mb_intra) {
4241  scantable= s->intra_scantable.scantable;
4242  perm_scantable= s->intra_scantable.permutated;
4243  if (!s->h263_aic) {
4244  if (n < 4)
4245  q = s->y_dc_scale;
4246  else
4247  q = s->c_dc_scale;
4248  } else{
4249  /* For AIC we skip quant/dequant of INTRADC */
4250  q = 1;
4251  qadd=0;
4252  }
4253  q <<= RECON_SHIFT-3;
4254  /* note: block[0] is assumed to be positive */
4255  dc= block[0]*q;
4256 // block[0] = (block[0] + (q >> 1)) / q;
4257  start_i = 1;
4258 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4259 // bias= 1<<(QMAT_SHIFT-1);
4260  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4261  length = s->intra_chroma_ac_vlc_length;
4262  last_length= s->intra_chroma_ac_vlc_last_length;
4263  } else {
4264  length = s->intra_ac_vlc_length;
4265  last_length= s->intra_ac_vlc_last_length;
4266  }
4267  } else {
4268  scantable= s->inter_scantable.scantable;
4269  perm_scantable= s->inter_scantable.permutated;
4270  dc= 0;
4271  start_i = 0;
4272  length = s->inter_ac_vlc_length;
4273  last_length= s->inter_ac_vlc_last_length;
4274  }
4275  last_non_zero = s->block_last_index[n];
4276 
4277  dc += (1<<(RECON_SHIFT-1));
4278  for(i=0; i<64; i++){
4279  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4280  }
4281 
4282  sum=0;
4283  for(i=0; i<64; i++){
4284  int one= 36;
4285  int qns=4;
4286  int w;
4287 
4288  w= FFABS(weight[i]) + qns*one;
4289  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4290 
4291  weight[i] = w;
4292 // w=weight[i] = (63*qns + (w/2)) / w;
4293 
4294  av_assert2(w>0);
4295  av_assert2(w<(1<<6));
4296  sum += w*w;
4297  }
4298  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4299 
4300  run=0;
4301  rle_index=0;
4302  for(i=start_i; i<=last_non_zero; i++){
4303  int j= perm_scantable[i];
4304  const int level= block[j];
4305  int coeff;
4306 
4307  if(level){
4308  if(level<0) coeff= qmul*level - qadd;
4309  else coeff= qmul*level + qadd;
4310  run_tab[rle_index++]=run;
4311  run=0;
4312 
4313  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4314  }else{
4315  run++;
4316  }
4317  }
4318 
4319  for(;;){
4320  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4321  int best_coeff=0;
4322  int best_change=0;
4323  int run2, best_unquant_change=0, analyze_gradient;
4324  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4325 
4326  if(analyze_gradient){
4327  for(i=0; i<64; i++){
4328  int w= weight[i];
4329 
4330  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4331  }
4332  s->fdsp.fdct(d1);
4333  }
4334 
4335  if(start_i){
4336  const int level= block[0];
4337  int change, old_coeff;
4338 
4339  av_assert2(s->mb_intra);
4340 
4341  old_coeff= q*level;
4342 
4343  for(change=-1; change<=1; change+=2){
4344  int new_level= level + change;
4345  int score, new_coeff;
4346 
4347  new_coeff= q*new_level;
4348  if(new_coeff >= 2048 || new_coeff < 0)
4349  continue;
4350 
4351  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4352  new_coeff - old_coeff);
4353  if(score<best_score){
4354  best_score= score;
4355  best_coeff= 0;
4356  best_change= change;
4357  best_unquant_change= new_coeff - old_coeff;
4358  }
4359  }
4360  }
4361 
4362  run=0;
4363  rle_index=0;
4364  run2= run_tab[rle_index++];
4365  prev_level=0;
4366  prev_run=0;
4367 
4368  for(i=start_i; i<64; i++){
4369  int j= perm_scantable[i];
4370  const int level= block[j];
4371  int change, old_coeff;
4372 
4373  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4374  break;
4375 
4376  if(level){
4377  if(level<0) old_coeff= qmul*level - qadd;
4378  else old_coeff= qmul*level + qadd;
4379  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4380  }else{
4381  old_coeff=0;
4382  run2--;
4383  av_assert2(run2>=0 || i >= last_non_zero );
4384  }
4385 
4386  for(change=-1; change<=1; change+=2){
4387  int new_level= level + change;
4388  int score, new_coeff, unquant_change;
4389 
4390  score=0;
4391  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4392  continue;
4393 
4394  if(new_level){
4395  if(new_level<0) new_coeff= qmul*new_level - qadd;
4396  else new_coeff= qmul*new_level + qadd;
4397  if(new_coeff >= 2048 || new_coeff <= -2048)
4398  continue;
4399  //FIXME check for overflow
4400 
4401  if(level){
4402  if(level < 63 && level > -63){
4403  if(i < last_non_zero)
4404  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4405  - length[UNI_AC_ENC_INDEX(run, level+64)];
4406  else
4407  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4408  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4409  }
4410  }else{
4411  av_assert2(FFABS(new_level)==1);
4412 
4413  if(analyze_gradient){
4414  int g= d1[ scantable[i] ];
4415  if(g && (g^new_level) >= 0)
4416  continue;
4417  }
4418 
4419  if(i < last_non_zero){
4420  int next_i= i + run2 + 1;
4421  int next_level= block[ perm_scantable[next_i] ] + 64;
4422 
4423  if(next_level&(~127))
4424  next_level= 0;
4425 
4426  if(next_i < last_non_zero)
4427  score += length[UNI_AC_ENC_INDEX(run, 65)]
4428  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4429  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4430  else
4431  score += length[UNI_AC_ENC_INDEX(run, 65)]
4432  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4433  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4434  }else{
4435  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4436  if(prev_level){
4437  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4438  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4439  }
4440  }
4441  }
4442  }else{
4443  new_coeff=0;
4444  av_assert2(FFABS(level)==1);
4445 
4446  if(i < last_non_zero){
4447  int next_i= i + run2 + 1;
4448  int next_level= block[ perm_scantable[next_i] ] + 64;
4449 
4450  if(next_level&(~127))
4451  next_level= 0;
4452 
4453  if(next_i < last_non_zero)
4454  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4455  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4456  - length[UNI_AC_ENC_INDEX(run, 65)];
4457  else
4458  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4459  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4460  - length[UNI_AC_ENC_INDEX(run, 65)];
4461  }else{
4462  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4463  if(prev_level){
4464  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4465  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4466  }
4467  }
4468  }
4469 
4470  score *= lambda;
4471 
4472  unquant_change= new_coeff - old_coeff;
4473  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4474 
4475  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4476  unquant_change);
4477  if(score<best_score){
4478  best_score= score;
4479  best_coeff= i;
4480  best_change= change;
4481  best_unquant_change= unquant_change;
4482  }
4483  }
4484  if(level){
4485  prev_level= level + 64;
4486  if(prev_level&(~127))
4487  prev_level= 0;
4488  prev_run= run;
4489  run=0;
4490  }else{
4491  run++;
4492  }
4493  }
4494 
4495  if(best_change){
4496  int j= perm_scantable[ best_coeff ];
4497 
4498  block[j] += best_change;
4499 
4500  if(best_coeff > last_non_zero){
4501  last_non_zero= best_coeff;
4502  av_assert2(block[j]);
4503  }else{
4504  for(; last_non_zero>=start_i; last_non_zero--){
4505  if(block[perm_scantable[last_non_zero]])
4506  break;
4507  }
4508  }
4509 
4510  run=0;
4511  rle_index=0;
4512  for(i=start_i; i<=last_non_zero; i++){
4513  int j= perm_scantable[i];
4514  const int level= block[j];
4515 
4516  if(level){
4517  run_tab[rle_index++]=run;
4518  run=0;
4519  }else{
4520  run++;
4521  }
4522  }
4523 
4524  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4525  }else{
4526  break;
4527  }
4528  }
4529 
4530  return last_non_zero;
4531 }
4532 
4533 /**
4534  * Permute an 8x8 block according to permutation.
4535  * @param block the block which will be permuted according to
4536  * the given permutation vector
4537  * @param permutation the permutation vector
4538  * @param last the last non zero coefficient in scantable order, used to
4539  * speed the permutation up
4540  * @param scantable the used scantable, this is only used to speed the
4541  * permutation up, the block is not (inverse) permutated
4542  * to scantable order!
4543  */
4544 void ff_block_permute(int16_t *block, uint8_t *permutation,
4545  const uint8_t *scantable, int last)
4546 {
4547  int i;
4548  int16_t temp[64];
4549 
4550  if (last <= 0)
4551  return;
4552  //FIXME it is ok but not clean and might fail for some permutations
4553  // if (permutation[1] == 1)
4554  // return;
4555 
4556  for (i = 0; i <= last; i++) {
4557  const int j = scantable[i];
4558  temp[j] = block[j];
4559  block[j] = 0;
4560  }
4561 
4562  for (i = 0; i <= last; i++) {
4563  const int j = scantable[i];
4564  const int perm_j = permutation[j];
4565  block[perm_j] = temp[j];
4566  }
4567 }
4568 
4570  int16_t *block, int n,
4571  int qscale, int *overflow)
4572 {
4573  int i, j, level, last_non_zero, q, start_i;
4574  const int *qmat;
4575  const uint8_t *scantable;
4576  int bias;
4577  int max=0;
4578  unsigned int threshold1, threshold2;
4579 
4580  s->fdsp.fdct(block);
4581 
4582  if(s->dct_error_sum)
4583  s->denoise_dct(s, block);
4584 
4585  if (s->mb_intra) {
4586  scantable= s->intra_scantable.scantable;
4587  if (!s->h263_aic) {
4588  if (n < 4)
4589  q = s->y_dc_scale;
4590  else
4591  q = s->c_dc_scale;
4592  q = q << 3;
4593  } else
4594  /* For AIC we skip quant/dequant of INTRADC */
4595  q = 1 << 3;
4596 
4597  /* note: block[0] is assumed to be positive */
4598  block[0] = (block[0] + (q >> 1)) / q;
4599  start_i = 1;
4600  last_non_zero = 0;
4601  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4602  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4603  } else {
4604  scantable= s->inter_scantable.scantable;
4605  start_i = 0;
4606  last_non_zero = -1;
4607  qmat = s->q_inter_matrix[qscale];
4608  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4609  }
4610  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4611  threshold2= (threshold1<<1);
4612  for(i=63;i>=start_i;i--) {
4613  j = scantable[i];
4614  level = block[j] * qmat[j];
4615 
4616  if(((unsigned)(level+threshold1))>threshold2){
4617  last_non_zero = i;
4618  break;
4619  }else{
4620  block[j]=0;
4621  }
4622  }
4623  for(i=start_i; i<=last_non_zero; i++) {
4624  j = scantable[i];
4625  level = block[j] * qmat[j];
4626 
4627 // if( bias+level >= (1<<QMAT_SHIFT)
4628 // || bias-level >= (1<<QMAT_SHIFT)){
4629  if(((unsigned)(level+threshold1))>threshold2){
4630  if(level>0){
4631  level= (bias + level)>>QMAT_SHIFT;
4632  block[j]= level;
4633  }else{
4634  level= (bias - level)>>QMAT_SHIFT;
4635  block[j]= -level;
4636  }
4637  max |=level;
4638  }else{
4639  block[j]=0;
4640  }
4641  }
4642  *overflow= s->max_qcoeff < max; //overflow might have happened
4643 
4644  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4645  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4646  ff_block_permute(block, s->idsp.idct_permutation,
4647  scantable, last_non_zero);
4648 
4649  return last_non_zero;
4650 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:88
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1302
MpegEncContext::i_count
int i_count
Definition: mpegvideo.h:341
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:681
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:82
ff_speedhq_end_slice
void ff_speedhq_end_slice(MpegEncContext *s)
Definition: speedhqenc.c:149
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:234
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:427
MpegEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:286
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:261
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
h263data.h
ff_speedhq_encode_init
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
Definition: speedhqenc.c:98
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:100
level
uint8_t level
Definition: svq3.c:204
MpegEncContext::data_partitioning
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:386
av_clip
#define av_clip
Definition: common.h:98
set_frame_distances
static void set_frame_distances(MpegEncContext *s)
Definition: mpegvideo_enc.c:3564
get_bits_diff
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideoenc.h:139
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:540
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:196
H263_GOB_HEIGHT
#define H263_GOB_HEIGHT(h)
Definition: h263.h:28
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
r
const char * r
Definition: vf_curves.c:126
acc
int acc
Definition: yuv2rgb.c:554
ff_h261_encode_init
av_cold int ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:372
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:222
ff_mpeg1_encode_mb
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
MpegEncContext::b_code
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:225
mem_internal.h
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:521
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:216
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1299
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1655
MAY_BE_MPEG12
#define MAY_BE_MPEG12
Definition: mpv_reconstruct_mb_template.c:24
ff_speedhq_encode_picture_header
void ff_speedhq_encode_picture_header(MpegEncContext *s)
Definition: speedhqenc.c:137
ff_wmv2_encode_picture_header
int ff_wmv2_encode_picture_header(MpegEncContext *s)
Definition: wmv2enc.c:96
thread.h
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:607
encode_mb
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2508
matrix
Definition: vc1dsp.c:42
ff_h261_encode_picture_header
void ff_h261_encode_picture_header(MpegEncContext *s)
Definition: h261enc.c:55
src1
const pixel * src1
Definition: h264pred_template.c:421
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:224
mpegvideoenc.h
sse_mb
static int sse_mb(MpegEncContext *s)
Definition: mpegvideo_enc.c:2657
mpv_reconstruct_mb_template.c
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
COPY
#define COPY(a)
ff_block_permute
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4544
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4196
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:974
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:169
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:163
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2710
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:834
update_noise_reduction
static void update_noise_reduction(MpegEncContext *s)
Definition: mpegvideo_enc.c:1687
out_size
int out_size
Definition: movenc.c:55
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:259
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:88
MpegEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:387
MpegEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:327
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:490
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:222
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:452
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:37
AVFrame::width
int width
Definition: frame.h:412
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:107
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:206
w
uint8_t w
Definition: llviddspenc.c:38
qmat16
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.h:356
BUF_BITS
static const int BUF_BITS
Definition: put_bits.h:48
internal.h
ff_h261_encode_mb
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:234
AVPacket::data
uint8_t * data
Definition: packet.h:522
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:374
AVOption
AVOption.
Definition: opt.h:346
encode.h
b
#define b
Definition: input.c:41
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:100
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:109
data
const char data[16]
Definition: mxf.c:148
MpegEncContext::vbv_delay
int vbv_delay
Definition: mpegvideo.h:202
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
MpegEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideo.h:214
ff_mjpeg_encode_init
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:294
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegutils.h:114
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:815
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:845
ff_msmpeg4_encode_mb
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:386
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:540
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:39
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:85
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:481
sqr
static double sqr(double in)
Definition: af_afwtdn.c:871
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:326
Picture
Picture.
Definition: mpegpicture.h:46
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:100
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2691
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2093
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
MpegEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideo.h:140
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:962
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:98
wmv2enc.h
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1263
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:351
mpegutils.h
ff_flv_encode_picture_header
void ff_flv_encode_picture_header(MpegEncContext *s)
Definition: flvenc.c:28
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:228
encode_picture
static int encode_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:3578
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:601
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:577
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:74
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
MPEGVIDEO_MAX_PLANES
#define MPEGVIDEO_MAX_PLANES
Definition: mpegpicture.h:32
MpegEncContext::mv
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:271
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:258
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:108
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:108
MAX_FCODE
#define MAX_FCODE
Definition: mpegutils.h:41
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:904
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:232
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1754
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:65
sp5x.h
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
estimate_qp
static int estimate_qp(MpegEncContext *s, int dry_run)
Definition: mpegvideo_enc.c:3526
FDCTDSPContext
Definition: fdctdsp.h:28
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
Definition: mpegpicture.c:253
faandct.h
Floating point AAN DCT.
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:201
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:197
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:831
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:454
mpeg12enc.h
merge_context_after_me
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3492
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:190
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:453
skip_check
static int skip_check(MpegEncContext *s, const Picture *p, const Picture *ref)
Definition: mpegvideo_enc.c:1261
fail
#define fail()
Definition: checkasm.h:179
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:139
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:105
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1008
ff_h263dsp_init
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
perm
perm
Definition: f_perms.c:75
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1231
FF_ALLOC_TYPED_ARRAY
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:87
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:334
pts
static int64_t pts
Definition: transcode_aac.c:643
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:42
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
ff_h263_update_motion_val
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:53
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:318
ff_sqrt
#define ff_sqrt
Definition: mathops.h:218
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
mpv_encode_init_static
static void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:265
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:99
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:439
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
ff_dct_encode_init
av_cold int ff_dct_encode_init(MpegEncContext *s)
Definition: mpegvideo_enc.c:290
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:330
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2821
ff_mjpeg_encode_mb
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:523
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:76
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:876
MpegEncContext::i_tex_bits
int i_tex_bits
Definition: mpegvideo.h:339
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:782
frame_start
static int frame_start(MpegEncContext *s)
Definition: mpegvideo_enc.c:1708
RateControlContext
rate control context.
Definition: ratecontrol.h:63
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
ff_init_qscale_tab
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:235
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
update_mb_info
static void update_mb_info(MpegEncContext *s, int startcode)
Definition: mpegvideo_enc.c:2798
av_cold
#define av_cold
Definition: attributes.h:90
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
MAX_MV
#define MAX_MV
Definition: motion_est.h:35
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:591
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:92
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:129
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_enc.c:1040
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4198
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:723
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
width
#define width
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:40
ff_h263_chroma_qscale_table
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:260
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1057
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:112
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:450
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
MpegEncContext::mv_dir
int mv_dir
Definition: mpegvideo.h:256
g
const char * g
Definition: vf_curves.c:127
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:864
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1494
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:150
FMT_H261
@ FMT_H261
Definition: mpegutils.h:118
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1574
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:219
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1292
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1527
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:269
PutBitContext
Definition: put_bits.h:50
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
frame
static AVFrame * frame
Definition: demux_decode.c:54
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:855
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2735
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
Picture::reference
int reference
Definition: mpegpicture.h:77
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1277
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:380
MpegEncContext::pb2
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:390
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
ff_mpeg4_encode_mb
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:484
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
PutBitContext::buf
uint8_t * buf
Definition: put_bits.h:53
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:140
NULL
#define NULL
Definition: coverity.c:32
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpv_reconstruct_mb_template.c:56
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:637
write_mb_info
static void write_mb_info(MpegEncContext *s)
Definition: mpegvideo_enc.c:2778
run
uint8_t run
Definition: svq3.c:203
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:283
MpegEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideo.h:325
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:224
bias
static int bias(int x, int c)
Definition: vqcdec.c:114
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:342
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
EDGE_WIDTH
#define EDGE_WIDTH
Definition: mpegpicture.h:34
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(Picture *dst, Picture *src)
Definition: mpegpicture.c:304
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:480
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:495
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:56
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo_enc.c:248
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:878
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:275
mathops.h
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:347
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3491
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:894
ff_vbv_update
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:690
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:987
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:963
qpeldsp.h
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:128
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1327
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
mpeg12codecs.h
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1324
ff_mjpeg_amv_encode_picture_header
void ff_mjpeg_amv_encode_picture_header(MpegEncContext *s)
Definition: mjpegenc.c:93
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegutils.h:120
AVOnce
#define AVOnce
Definition: thread.h:202
ff_mpeg4_encode_picture_header
int ff_mpeg4_encode_picture_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1059
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_mpeg1_encode_init
void ff_mpeg1_encode_init(MpegEncContext *s)
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1562
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
ff_clean_h263_qscales
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
Definition: ituh263enc.c:272
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:262
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:841
mpv_encode_defaults
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:275
denoise_dct_c
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
Definition: mpegvideo_enc.c:3859
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:411
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1337
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:544
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:794
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1402
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:131
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1334
encode_mb_internal
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2117
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:236
merge_context_after_encode
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3498
MpegEncContext::mb_intra
int mb_intra
Definition: mpegvideo.h:285
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:80
Picture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:80
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:523
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: vvc_intra.c:291
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1031
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:194
load_input_picture
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1120
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:57
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:312
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
Definition: mpegvideo_enc.c:2003
shift
static int shift(int a, int b)
Definition: bonk.c:262
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:295
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:594
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1065
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:106
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:197
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
MpegEncContext::dct_count
int dct_count[2]
Definition: mpegvideo.h:326
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegutils.h:121
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:281
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:94
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:86
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:41
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:112
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:279
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:521
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:314
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
MpegEncContext::mv_type
int mv_type
Definition: mpegvideo.h:260
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:264
ff_h263_encode_mb
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:494
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:528
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:63
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:284
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:815
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:87
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:36
M_PI
#define M_PI
Definition: mathematics.h:67
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
PutBitContext::buf_end
uint8_t * buf_end
Definition: put_bits.h:53
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
update_qscale
static void update_qscale(MpegEncContext *s)
Definition: mpegvideo_enc.c:191
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:138
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:451
ff_msmpeg4_encode_picture_header
void ff_msmpeg4_encode_picture_header(MpegEncContext *s)
Definition: msmpeg4enc.c:219
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:299
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
ff_speedhq_encode_mb
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: speedhqenc.c:246
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:146
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:455
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:94
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:449
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:515
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3883
MpegEncContext::encoding_error
uint64_t encoding_error[MPEGVIDEO_MAX_PLANES]
Definition: mpegvideo.h:251
get_intra_count
static int get_intra_count(MpegEncContext *s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1071
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2855
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:35
estimate_best_b_count
static int estimate_best_b_count(MpegEncContext *s)
Definition: mpegvideo_enc.c:1323
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:997
src2
const pixel * src2
Definition: h264pred_template.c:422
MpegEncContext::tex_pb
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:389
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:274
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:283
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:105
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:816
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:905
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int encoding, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:203
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:534
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
copy_context_before_encode
static void copy_context_before_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2518
ff_init_me
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:308
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:246
AVCodecContext::height
int height
Definition: avcodec.h:618
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:507
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:681
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:102
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
idctdsp.h
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:352
ret
ret
Definition: filter_design.txt:187
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic)
Definition: mpegvideo_enc.c:1094
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:290
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1379
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:110
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
encode_mb_hq
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2585
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:105
ff_mpeg1_encode_picture_header
void ff_mpeg1_encode_picture_header(MpegEncContext *s)
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
Picture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:81
mpeg12data.h
ff_set_mpeg4_time
void ff_set_mpeg4_time(MpegEncContext *s)
Definition: mpeg4videoenc.c:874
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
ff_fix_long_mvs
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1704
MpegEncContext::dquant
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:200
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:452
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:862
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFrame::height
int height
Definition: frame.h:412
default_mv_penalty
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:91
Picture::shared
int shared
Definition: mpegpicture.h:78
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:890
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1363
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:377
MpegEncContext::skip_count
int skip_count
Definition: mpegvideo.h:342
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:97
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: avpacket.c:231
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1256
AVRational::den
int den
Denominator.
Definition: rational.h:60
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:47
select_input_picture
static int select_input_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:1468
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:848
frame_end
static void frame_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:1651
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
temp
else temp
Definition: vf_mcdeint.c:263
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:862
flvenc.h
av_clip_uint8
#define av_clip_uint8
Definition: common.h:104
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:83
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:103
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:965
ff_h263_encode_picture_header
void ff_h263_encode_picture_header(MpegEncContext *s)
Definition: ituh263enc.c:108
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
ff_speedhq_mb_y_order_to_mb
int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.c:274
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:795
MpegEncContext::noise_reduction
int noise_reduction
Definition: mpegvideo.h:537
ff_convert_matrix
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:107
packet_internal.h
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:310
MpegEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideo.h:531
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1306
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
ff_set_cmp
int ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:476
dct_quantize_refine
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4217
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MpegEncContext *s)
Definition: rv10enc.c:34
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
copy_context_after_encode
static void copy_context_after_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2546
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:257
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1047
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:964
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
mpeg4videodata.h
clip_coeffs
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
Definition: mpegvideo_enc.c:2059
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:983
MAX_B_FRAMES
#define MAX_B_FRAMES
Definition: mpegvideo.h:53
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
d
d
Definition: ffmpeg_filter.c:425
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:79
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sse
static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2636
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:343
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:461
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
ff_get_2pass_fcode
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:865
h
h
Definition: vp9dsp_template.c:2038
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:856
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:143
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:40
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
ff_dct_quantize_c
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4569
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
int
int
Definition: ffmpeg_filter.c:425
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:282
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:220
ff_wmv2_encode_mb
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:169
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:642
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:340
ff_rv20_encode_picture_header
void ff_rv20_encode_picture_header(MpegEncContext *s)
Definition: rv20enc.c:37
pixblockdsp.h
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:104
ff_get_best_fcode
int ff_get_best_fcode(MpegEncContext *s, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1602
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:975
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:310
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:448
h263.h
write_slice_end
static void write_slice_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:2758
intmath.h