FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <stdint.h>
37 
38 #include "libavutil/emms.h"
39 #include "libavutil/internal.h"
40 #include "libavutil/intmath.h"
41 #include "libavutil/mathematics.h"
42 #include "libavutil/mem_internal.h"
43 #include "libavutil/opt.h"
44 #include "libavutil/thread.h"
45 #include "avcodec.h"
46 #include "encode.h"
47 #include "idctdsp.h"
48 #include "mpeg12codecs.h"
49 #include "mpeg12data.h"
50 #include "mpeg12enc.h"
51 #include "mpegvideo.h"
52 #include "mpegvideodata.h"
53 #include "mpegvideoenc.h"
54 #include "h261enc.h"
55 #include "h263.h"
56 #include "h263data.h"
57 #include "h263enc.h"
58 #include "mjpegenc_common.h"
59 #include "mathops.h"
60 #include "mpegutils.h"
61 #include "mjpegenc.h"
62 #include "speedhqenc.h"
63 #include "msmpeg4enc.h"
64 #include "pixblockdsp.h"
65 #include "qpeldsp.h"
66 #include "faandct.h"
67 #include "aandcttab.h"
68 #include "flvenc.h"
69 #include "mpeg4video.h"
70 #include "mpeg4videodata.h"
71 #include "mpeg4videoenc.h"
72 #include "internal.h"
73 #include "bytestream.h"
74 #include "wmv2enc.h"
75 #include "rv10enc.h"
76 #include "packet_internal.h"
77 #include <limits.h>
78 #include "sp5x.h"
79 
80 #define QUANT_BIAS_SHIFT 8
81 
82 #define QMAT_SHIFT_MMX 16
83 #define QMAT_SHIFT 21
84 
85 static int encode_picture(MpegEncContext *s);
86 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
87 static int sse_mb(MpegEncContext *s);
88 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
89 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
90 
91 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
92 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
93 
94 static const AVOption mpv_generic_options[] = {
97  { NULL },
98 };
99 
101  .class_name = "generic mpegvideo encoder",
102  .item_name = av_default_item_name,
103  .option = mpv_generic_options,
104  .version = LIBAVUTIL_VERSION_INT,
105 };
106 
107 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
108  uint16_t (*qmat16)[2][64],
109  const uint16_t *quant_matrix,
110  int bias, int qmin, int qmax, int intra)
111 {
112  FDCTDSPContext *fdsp = &s->fdsp;
113  int qscale;
114  int shift = 0;
115 
116  for (qscale = qmin; qscale <= qmax; qscale++) {
117  int i;
118  int qscale2;
119 
120  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
121  else qscale2 = qscale << 1;
122 
123  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
124 #if CONFIG_FAANDCT
125  fdsp->fdct == ff_faandct ||
126 #endif /* CONFIG_FAANDCT */
128  for (i = 0; i < 64; i++) {
129  const int j = s->idsp.idct_permutation[i];
130  int64_t den = (int64_t) qscale2 * quant_matrix[j];
131  /* 16 <= qscale * quant_matrix[i] <= 7905
132  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
133  * 19952 <= x <= 249205026
134  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
135  * 3444240 >= (1 << 36) / (x) >= 275 */
136 
137  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
138  }
139  } else if (fdsp->fdct == ff_fdct_ifast) {
140  for (i = 0; i < 64; i++) {
141  const int j = s->idsp.idct_permutation[i];
142  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
143  /* 16 <= qscale * quant_matrix[i] <= 7905
144  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
145  * 19952 <= x <= 249205026
146  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
147  * 3444240 >= (1 << 36) / (x) >= 275 */
148 
149  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
150  }
151  } else {
152  for (i = 0; i < 64; i++) {
153  const int j = s->idsp.idct_permutation[i];
154  int64_t den = (int64_t) qscale2 * quant_matrix[j];
155  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
156  * Assume x = qscale * quant_matrix[i]
157  * So 16 <= x <= 7905
158  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
159  * so 32768 >= (1 << 19) / (x) >= 67 */
160  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
161  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
162  // (qscale * quant_matrix[i]);
163  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
164 
165  if (qmat16[qscale][0][i] == 0 ||
166  qmat16[qscale][0][i] == 128 * 256)
167  qmat16[qscale][0][i] = 128 * 256 - 1;
168  qmat16[qscale][1][i] =
169  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
170  qmat16[qscale][0][i]);
171  }
172  }
173 
174  for (i = intra; i < 64; i++) {
175  int64_t max = 8191;
176  if (fdsp->fdct == ff_fdct_ifast) {
177  max = (8191LL * ff_aanscales[i]) >> 14;
178  }
179  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
180  shift++;
181  }
182  }
183  }
184  if (shift) {
185  av_log(s->avctx, AV_LOG_INFO,
186  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
187  QMAT_SHIFT - shift);
188  }
189 }
190 
191 static inline void update_qscale(MpegEncContext *s)
192 {
193  if (s->q_scale_type == 1 && 0) {
194  int i;
195  int bestdiff=INT_MAX;
196  int best = 1;
197 
198  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
199  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
200  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
201  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
202  continue;
203  if (diff < bestdiff) {
204  bestdiff = diff;
205  best = i;
206  }
207  }
208  s->qscale = best;
209  } else {
210  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
211  (FF_LAMBDA_SHIFT + 7);
212  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
213  }
214 
215  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
217 }
218 
220 {
221  int i;
222 
223  if (matrix) {
224  put_bits(pb, 1, 1);
225  for (i = 0; i < 64; i++) {
227  }
228  } else
229  put_bits(pb, 1, 0);
230 }
231 
232 /**
233  * init s->current_picture.qscale_table from s->lambda_table
234  */
236 {
237  int8_t * const qscale_table = s->current_picture.qscale_table;
238  int i;
239 
240  for (i = 0; i < s->mb_num; i++) {
241  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
242  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
243  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
244  s->avctx->qmax);
245  }
246 }
247 
249  const MpegEncContext *src)
250 {
251 #define COPY(a) dst->a= src->a
252  COPY(pict_type);
254  COPY(f_code);
255  COPY(b_code);
256  COPY(qscale);
257  COPY(lambda);
258  COPY(lambda2);
259  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
260  COPY(progressive_frame); // FIXME don't set in encode_header
261  COPY(partitioned_frame); // FIXME don't set in encode_header
262 #undef COPY
263 }
264 
265 static void mpv_encode_init_static(void)
266 {
267  for (int i = -16; i < 16; i++)
268  default_fcode_tab[i + MAX_MV] = 1;
269 }
270 
271 /**
272  * Set the given MpegEncContext to defaults for encoding.
273  * the changed fields will not depend upon the prior state of the MpegEncContext.
274  */
276 {
277  static AVOnce init_static_once = AV_ONCE_INIT;
278 
280 
281  ff_thread_once(&init_static_once, mpv_encode_init_static);
282 
283  s->me.mv_penalty = default_mv_penalty;
284  s->fcode_tab = default_fcode_tab;
285 
286  s->input_picture_number = 0;
287  s->picture_in_gop_number = 0;
288 }
289 
291 {
292 #if ARCH_X86
294 #endif
295 
296  if (CONFIG_H263_ENCODER)
297  ff_h263dsp_init(&s->h263dsp);
298  if (!s->dct_quantize)
299  s->dct_quantize = ff_dct_quantize_c;
300  if (!s->denoise_dct)
301  s->denoise_dct = denoise_dct_c;
302  s->fast_dct_quantize = s->dct_quantize;
303  if (s->avctx->trellis)
304  s->dct_quantize = dct_quantize_trellis_c;
305 
306  return 0;
307 }
308 
309 /* init video encoder */
311 {
313  AVCPBProperties *cpb_props;
314  int i, ret;
315  int mb_array_size, mv_table_size;
316 
318 
319  switch (avctx->pix_fmt) {
320  case AV_PIX_FMT_YUVJ444P:
321  case AV_PIX_FMT_YUV444P:
322  s->chroma_format = CHROMA_444;
323  break;
324  case AV_PIX_FMT_YUVJ422P:
325  case AV_PIX_FMT_YUV422P:
326  s->chroma_format = CHROMA_422;
327  break;
328  case AV_PIX_FMT_YUVJ420P:
329  case AV_PIX_FMT_YUV420P:
330  default:
331  s->chroma_format = CHROMA_420;
332  break;
333  }
334 
336 
337  s->bit_rate = avctx->bit_rate;
338  s->width = avctx->width;
339  s->height = avctx->height;
340  if (avctx->gop_size > 600 &&
343  "keyframe interval too large!, reducing it from %d to %d\n",
344  avctx->gop_size, 600);
345  avctx->gop_size = 600;
346  }
347  s->gop_size = avctx->gop_size;
348  s->avctx = avctx;
350  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
351  "is %d.\n", MAX_B_FRAMES);
353  } else if (avctx->max_b_frames < 0) {
355  "max b frames must be 0 or positive for mpegvideo based encoders\n");
356  return AVERROR(EINVAL);
357  }
358  s->max_b_frames = avctx->max_b_frames;
359  s->codec_id = avctx->codec->id;
360  if (s->max_b_frames && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
361  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
362  return AVERROR(EINVAL);
363  }
364 
365  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
366  s->rtp_mode = !!s->rtp_payload_size;
367  s->intra_dc_precision = avctx->intra_dc_precision;
368 
369  // workaround some differences between how applications specify dc precision
370  if (s->intra_dc_precision < 0) {
371  s->intra_dc_precision += 8;
372  } else if (s->intra_dc_precision >= 8)
373  s->intra_dc_precision -= 8;
374 
375  if (s->intra_dc_precision < 0) {
377  "intra dc precision must be positive, note some applications use"
378  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
379  return AVERROR(EINVAL);
380  }
381 
382  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
383  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
384  return AVERROR(EINVAL);
385  }
386  s->user_specified_pts = AV_NOPTS_VALUE;
387 
388  if (s->gop_size <= 1) {
389  s->intra_only = 1;
390  s->gop_size = 12;
391  } else {
392  s->intra_only = 0;
393  }
394 
395  /* Fixed QSCALE */
396  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
397 
398  s->adaptive_quant = (avctx->lumi_masking ||
399  avctx->dark_masking ||
402  avctx->p_masking ||
403  s->border_masking ||
404  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
405  !s->fixed_qscale;
406 
407  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
408 
410  switch(avctx->codec_id) {
413  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
414  break;
415  case AV_CODEC_ID_MPEG4:
419  if (avctx->rc_max_rate >= 15000000) {
420  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
421  } else if(avctx->rc_max_rate >= 2000000) {
422  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
423  } else if(avctx->rc_max_rate >= 384000) {
424  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
425  } else
426  avctx->rc_buffer_size = 40;
427  avctx->rc_buffer_size *= 16384;
428  break;
429  }
430  if (avctx->rc_buffer_size) {
431  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
432  }
433  }
434 
435  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
436  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
437  return AVERROR(EINVAL);
438  }
439 
442  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
443  }
444 
446  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
447  return AVERROR(EINVAL);
448  }
449 
451  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
452  return AVERROR(EINVAL);
453  }
454 
455  if (avctx->rc_max_rate &&
459  "impossible bitrate constraints, this will fail\n");
460  }
461 
462  if (avctx->rc_buffer_size &&
465  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
466  return AVERROR(EINVAL);
467  }
468 
469  if (!s->fixed_qscale &&
472  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
474  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
475  if (nbt <= INT_MAX) {
476  avctx->bit_rate_tolerance = nbt;
477  } else
478  avctx->bit_rate_tolerance = INT_MAX;
479  }
480 
481  if (avctx->rc_max_rate &&
483  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
484  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
485  90000LL * (avctx->rc_buffer_size - 1) >
486  avctx->rc_max_rate * 0xFFFFLL) {
488  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
489  "specified vbv buffer is too large for the given bitrate!\n");
490  }
491 
492  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
493  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
494  s->codec_id != AV_CODEC_ID_FLV1) {
495  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
496  return AVERROR(EINVAL);
497  }
498 
499  if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
501  "OBMC is only supported with simple mb decision\n");
502  return AVERROR(EINVAL);
503  }
504 
505  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
506  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
507  return AVERROR(EINVAL);
508  }
509 
510  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
511  s->codec_id == AV_CODEC_ID_H263 ||
512  s->codec_id == AV_CODEC_ID_H263P) &&
513  (avctx->sample_aspect_ratio.num > 255 ||
514  avctx->sample_aspect_ratio.den > 255)) {
516  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
520  }
521 
522  if ((s->codec_id == AV_CODEC_ID_H263 ||
523  s->codec_id == AV_CODEC_ID_H263P) &&
524  (avctx->width > 2048 ||
525  avctx->height > 1152 )) {
526  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
527  return AVERROR(EINVAL);
528  }
529  if ((s->codec_id == AV_CODEC_ID_H263 ||
530  s->codec_id == AV_CODEC_ID_H263P ||
531  s->codec_id == AV_CODEC_ID_RV20) &&
532  ((avctx->width &3) ||
533  (avctx->height&3) )) {
534  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
535  return AVERROR(EINVAL);
536  }
537 
538  if (s->codec_id == AV_CODEC_ID_RV10 &&
539  (avctx->width &15 ||
540  avctx->height&15 )) {
541  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
542  return AVERROR(EINVAL);
543  }
544 
545  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
546  s->codec_id == AV_CODEC_ID_WMV2) &&
547  avctx->width & 1) {
548  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
549  return AVERROR(EINVAL);
550  }
551 
553  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
554  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
555  return AVERROR(EINVAL);
556  }
557 
558  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
559  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
560  return AVERROR(EINVAL);
561  }
562 
563  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
565  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
566  return AVERROR(EINVAL);
567  }
568 
569  if (s->scenechange_threshold < 1000000000 &&
572  "closed gop with scene change detection are not supported yet, "
573  "set threshold to 1000000000\n");
574  return AVERROR_PATCHWELCOME;
575  }
576 
578  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
581  "low delay forcing is only available for mpeg2, "
582  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
583  return AVERROR(EINVAL);
584  }
585  if (s->max_b_frames != 0) {
587  "B-frames cannot be used with low delay\n");
588  return AVERROR(EINVAL);
589  }
590  }
591 
592  if (s->q_scale_type == 1) {
593  if (avctx->qmax > 28) {
595  "non linear quant only supports qmax <= 28 currently\n");
596  return AVERROR_PATCHWELCOME;
597  }
598  }
599 
600  if (avctx->slices > 1 &&
602  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
603  return AVERROR(EINVAL);
604  }
605 
606  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
608  "notice: b_frame_strategy only affects the first pass\n");
609  s->b_frame_strategy = 0;
610  }
611 
613  if (i > 1) {
614  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
615  avctx->time_base.den /= i;
616  avctx->time_base.num /= i;
617  //return -1;
618  }
619 
620  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
621  // (a + x * 3 / 8) / x
622  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
623  s->inter_quant_bias = 0;
624  } else {
625  s->intra_quant_bias = 0;
626  // (a - x / 4) / x
627  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
628  }
629 
630  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
631  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
632  return AVERROR(EINVAL);
633  }
634 
635  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
636 
637  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
638  avctx->time_base.den > (1 << 16) - 1) {
640  "timebase %d/%d not supported by MPEG 4 standard, "
641  "the maximum admitted value for the timebase denominator "
642  "is %d\n", avctx->time_base.num, avctx->time_base.den,
643  (1 << 16) - 1);
644  return AVERROR(EINVAL);
645  }
646  s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
647 
648  switch (avctx->codec->id) {
649 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
651  s->rtp_mode = 1;
652  /* fallthrough */
654  s->out_format = FMT_MPEG1;
655  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
656  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
658  break;
659 #endif
660 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
661  case AV_CODEC_ID_MJPEG:
662  case AV_CODEC_ID_AMV:
663  s->out_format = FMT_MJPEG;
664  s->intra_only = 1; /* force intra only for jpeg */
665  if ((ret = ff_mjpeg_encode_init(s)) < 0)
666  return ret;
667  avctx->delay = 0;
668  s->low_delay = 1;
669  break;
670 #endif
671  case AV_CODEC_ID_SPEEDHQ:
672  s->out_format = FMT_SPEEDHQ;
673  s->intra_only = 1; /* force intra only for SHQ */
674  if (!CONFIG_SPEEDHQ_ENCODER)
676  if ((ret = ff_speedhq_encode_init(s)) < 0)
677  return ret;
678  avctx->delay = 0;
679  s->low_delay = 1;
680  break;
681  case AV_CODEC_ID_H261:
682  if (!CONFIG_H261_ENCODER)
685  if (ret < 0)
686  return ret;
687  s->out_format = FMT_H261;
688  avctx->delay = 0;
689  s->low_delay = 1;
690  s->rtp_mode = 0; /* Sliced encoding not supported */
691  break;
692  case AV_CODEC_ID_H263:
693  if (!CONFIG_H263_ENCODER)
696  s->width, s->height) == 8) {
698  "The specified picture size of %dx%d is not valid for "
699  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
700  "352x288, 704x576, and 1408x1152. "
701  "Try H.263+.\n", s->width, s->height);
702  return AVERROR(EINVAL);
703  }
704  s->out_format = FMT_H263;
705  avctx->delay = 0;
706  s->low_delay = 1;
707  break;
708  case AV_CODEC_ID_H263P:
709  s->out_format = FMT_H263;
710  s->h263_plus = 1;
711  /* Fx */
712  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
713  s->modified_quant = s->h263_aic;
714  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
715  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
716 
717  /* /Fx */
718  /* These are just to be sure */
719  avctx->delay = 0;
720  s->low_delay = 1;
721  break;
722  case AV_CODEC_ID_FLV1:
723  s->out_format = FMT_H263;
724  s->h263_flv = 2; /* format = 1; 11-bit codes */
725  s->unrestricted_mv = 1;
726  s->rtp_mode = 0; /* don't allow GOB */
727  avctx->delay = 0;
728  s->low_delay = 1;
729  break;
730  case AV_CODEC_ID_RV10:
731  s->out_format = FMT_H263;
732  avctx->delay = 0;
733  s->low_delay = 1;
734  break;
735  case AV_CODEC_ID_RV20:
736  s->out_format = FMT_H263;
737  avctx->delay = 0;
738  s->low_delay = 1;
739  s->modified_quant = 1;
740  s->h263_aic = 1;
741  s->h263_plus = 1;
742  s->loop_filter = 1;
743  s->unrestricted_mv = 0;
744  break;
745  case AV_CODEC_ID_MPEG4:
746  s->out_format = FMT_H263;
747  s->h263_pred = 1;
748  s->unrestricted_mv = 1;
749  s->low_delay = s->max_b_frames ? 0 : 1;
750  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
751  break;
753  s->out_format = FMT_H263;
754  s->h263_pred = 1;
755  s->unrestricted_mv = 1;
756  s->msmpeg4_version = 2;
757  avctx->delay = 0;
758  s->low_delay = 1;
759  break;
761  s->out_format = FMT_H263;
762  s->h263_pred = 1;
763  s->unrestricted_mv = 1;
764  s->msmpeg4_version = 3;
765  s->flipflop_rounding = 1;
766  avctx->delay = 0;
767  s->low_delay = 1;
768  break;
769  case AV_CODEC_ID_WMV1:
770  s->out_format = FMT_H263;
771  s->h263_pred = 1;
772  s->unrestricted_mv = 1;
773  s->msmpeg4_version = 4;
774  s->flipflop_rounding = 1;
775  avctx->delay = 0;
776  s->low_delay = 1;
777  break;
778  case AV_CODEC_ID_WMV2:
779  s->out_format = FMT_H263;
780  s->h263_pred = 1;
781  s->unrestricted_mv = 1;
782  s->msmpeg4_version = 5;
783  s->flipflop_rounding = 1;
784  avctx->delay = 0;
785  s->low_delay = 1;
786  break;
787  default:
788  return AVERROR(EINVAL);
789  }
790 
791  avctx->has_b_frames = !s->low_delay;
792 
793  s->encoding = 1;
794 
795  s->progressive_frame =
796  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
798  s->alternate_scan);
799 
800  if (s->lmin > s->lmax) {
801  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", s->lmax);
802  s->lmin = s->lmax;
803  }
804 
805  /* init */
807  if ((ret = ff_mpv_common_init(s)) < 0)
808  return ret;
809 
810  ff_fdctdsp_init(&s->fdsp, avctx);
811  ff_me_cmp_init(&s->mecc, avctx);
812  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
813  ff_pixblockdsp_init(&s->pdsp, avctx);
814 
815  if (!(avctx->stats_out = av_mallocz(256)) ||
816  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
817  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
818  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
819  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
820  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
821  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
822  !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_B_FRAMES + 1) ||
823  !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_B_FRAMES + 1) ||
824  !(s->new_picture = av_frame_alloc()))
825  return AVERROR(ENOMEM);
826 
827  /* Allocate MV tables; the MV and MB tables will be copied
828  * to slice contexts by ff_update_duplicate_context(). */
829  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
830  if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) ||
831  !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) ||
832  !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) ||
833  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_forw_mv_table_base, mv_table_size) ||
834  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_back_mv_table_base, mv_table_size) ||
835  !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size))
836  return AVERROR(ENOMEM);
837  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
838  s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
839  s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
840  s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
841  s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
842  s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
843 
844  /* Allocate MB type table */
845  mb_array_size = s->mb_stride * s->mb_height;
846  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) ||
847  !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) ||
848  !FF_ALLOC_TYPED_ARRAY (s->cplx_tab, mb_array_size) ||
849  !FF_ALLOC_TYPED_ARRAY (s->bits_tab, mb_array_size) ||
850  !FF_ALLOCZ_TYPED_ARRAY(s->mc_mb_var, mb_array_size) ||
851  !FF_ALLOCZ_TYPED_ARRAY(s->mb_var, mb_array_size) ||
852  !(s->mb_mean = av_mallocz(mb_array_size)))
853  return AVERROR(ENOMEM);
854 
855 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
856  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
857  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
858  int16_t (*tmp1)[2];
859  uint8_t *tmp2;
860  if (!(tmp1 = ALLOCZ_ARRAYS(s->b_field_mv_table_base, 8, mv_table_size)) ||
861  !(tmp2 = ALLOCZ_ARRAYS(s->b_field_select_table[0][0], 2 * 4, mv_table_size)) ||
862  !ALLOCZ_ARRAYS(s->p_field_select_table[0], 2 * 2, mv_table_size))
863  return AVERROR(ENOMEM);
864 
865  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
866  tmp1 += s->mb_stride + 1;
867 
868  for (int i = 0; i < 2; i++) {
869  for (int j = 0; j < 2; j++) {
870  for (int k = 0; k < 2; k++) {
871  s->b_field_mv_table[i][j][k] = tmp1;
872  tmp1 += mv_table_size;
873  }
874  s->b_field_select_table[i][j] = tmp2;
875  tmp2 += 2 * mv_table_size;
876  }
877  }
878  }
879 
880  if (s->noise_reduction) {
881  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
882  return AVERROR(ENOMEM);
883  }
884 
886 
887  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
888  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
889  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
890  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
891  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
892  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
893  } else {
894  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
895  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
896  }
897 
898  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
899  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
900 
901  if (s->slice_context_count > 1) {
902  s->rtp_mode = 1;
903 
905  s->h263_slice_structured = 1;
906  }
907 
908  s->quant_precision = 5;
909 
910  ret = ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
911  ret |= ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
912  if (ret < 0)
913  return AVERROR(EINVAL);
914 
915  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263) {
917  if (CONFIG_MSMPEG4ENC && s->msmpeg4_version)
919  }
920 
921  /* init q matrix */
922  for (i = 0; i < 64; i++) {
923  int j = s->idsp.idct_permutation[i];
924  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
925  s->mpeg_quant) {
926  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
927  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
928  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
929  s->intra_matrix[j] =
930  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
931  } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
932  s->intra_matrix[j] =
933  s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
934  } else {
935  /* MPEG-1/2 */
936  s->chroma_intra_matrix[j] =
937  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
938  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
939  }
940  if (avctx->intra_matrix)
941  s->intra_matrix[j] = avctx->intra_matrix[i];
942  if (avctx->inter_matrix)
943  s->inter_matrix[j] = avctx->inter_matrix[i];
944  }
945 
946  /* precompute matrix */
947  /* for mjpeg, we do include qscale in the matrix */
948  if (s->out_format != FMT_MJPEG) {
949  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
950  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
951  31, 1);
952  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
953  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
954  31, 0);
955  }
956 
957  if ((ret = ff_rate_control_init(s)) < 0)
958  return ret;
959 
960  if (s->b_frame_strategy == 2) {
961  for (i = 0; i < s->max_b_frames + 2; i++) {
962  s->tmp_frames[i] = av_frame_alloc();
963  if (!s->tmp_frames[i])
964  return AVERROR(ENOMEM);
965 
966  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
967  s->tmp_frames[i]->width = s->width >> s->brd_scale;
968  s->tmp_frames[i]->height = s->height >> s->brd_scale;
969 
970  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
971  if (ret < 0)
972  return ret;
973  }
974  }
975 
976  cpb_props = ff_encode_add_cpb_side_data(avctx);
977  if (!cpb_props)
978  return AVERROR(ENOMEM);
979  cpb_props->max_bitrate = avctx->rc_max_rate;
980  cpb_props->min_bitrate = avctx->rc_min_rate;
981  cpb_props->avg_bitrate = avctx->bit_rate;
982  cpb_props->buffer_size = avctx->rc_buffer_size;
983 
984  return 0;
985 }
986 
988 {
990  int i;
991 
993 
995 
996  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
997  av_frame_free(&s->tmp_frames[i]);
998 
999  av_frame_free(&s->new_picture);
1000 
1002 
1003  av_freep(&s->p_mv_table_base);
1004  av_freep(&s->b_forw_mv_table_base);
1005  av_freep(&s->b_back_mv_table_base);
1006  av_freep(&s->b_bidir_forw_mv_table_base);
1007  av_freep(&s->b_bidir_back_mv_table_base);
1008  av_freep(&s->b_direct_mv_table_base);
1009  av_freep(&s->b_field_mv_table_base);
1010  av_freep(&s->b_field_select_table[0][0]);
1011  av_freep(&s->p_field_select_table[0]);
1012 
1013  av_freep(&s->mb_type);
1014  av_freep(&s->lambda_table);
1015 
1016  av_freep(&s->cplx_tab);
1017  av_freep(&s->bits_tab);
1018 
1019  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1020  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1021  s->q_chroma_intra_matrix= NULL;
1022  s->q_chroma_intra_matrix16= NULL;
1023  av_freep(&s->q_intra_matrix);
1024  av_freep(&s->q_inter_matrix);
1025  av_freep(&s->q_intra_matrix16);
1026  av_freep(&s->q_inter_matrix16);
1027  av_freep(&s->input_picture);
1028  av_freep(&s->reordered_input_picture);
1029  av_freep(&s->dct_offset);
1030  av_freep(&s->mb_var);
1031  av_freep(&s->mc_mb_var);
1032  av_freep(&s->mb_mean);
1033 
1034  return 0;
1035 }
1036 
1037 #define IS_ENCODER 1
1039 
1040 static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
1041 {
1042  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1043  /* print DCT coefficients */
1044  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1045  for (int i = 0; i < 6; i++) {
1046  for (int j = 0; j < 64; j++) {
1047  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1048  block[i][s->idsp.idct_permutation[j]]);
1049  }
1050  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1051  }
1052  }
1053 
1055 }
1056 
1057 static int get_sae(const uint8_t *src, int ref, int stride)
1058 {
1059  int x,y;
1060  int acc = 0;
1061 
1062  for (y = 0; y < 16; y++) {
1063  for (x = 0; x < 16; x++) {
1064  acc += FFABS(src[x + y * stride] - ref);
1065  }
1066  }
1067 
1068  return acc;
1069 }
1070 
1071 static int get_intra_count(MpegEncContext *s, const uint8_t *src,
1072  const uint8_t *ref, int stride)
1073 {
1074  int x, y, w, h;
1075  int acc = 0;
1076 
1077  w = s->width & ~15;
1078  h = s->height & ~15;
1079 
1080  for (y = 0; y < h; y += 16) {
1081  for (x = 0; x < w; x += 16) {
1082  int offset = x + y * stride;
1083  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1084  stride, 16);
1085  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1086  int sae = get_sae(src + offset, mean, stride);
1087 
1088  acc += sae + 500 < sad;
1089  }
1090  }
1091  return acc;
1092 }
1093 
1095 {
1096  AVCodecContext *avctx = s->avctx;
1097  int ret;
1098 
1099  pic->f->width = avctx->width + 2 * EDGE_WIDTH;
1100  pic->f->height = avctx->height + 2 * EDGE_WIDTH;
1101 
1102  ret = ff_encode_alloc_frame(avctx, pic->f);
1103  if (ret < 0)
1104  return ret;
1105 
1106  for (int i = 0; pic->f->data[i]; i++) {
1107  int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
1108  pic->f->linesize[i] +
1109  (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
1110  pic->f->data[i] += offset;
1111  }
1112  pic->f->width = avctx->width;
1113  pic->f->height = avctx->height;
1114 
1115  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 1, s->out_format,
1116  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1117  &s->linesize, &s->uvlinesize);
1118 }
1119 
1120 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1121 {
1122  Picture *pic = NULL;
1123  int64_t pts;
1124  int i, display_picture_number = 0, ret;
1125  int encoding_delay = s->max_b_frames ? s->max_b_frames
1126  : (s->low_delay ? 0 : 1);
1127  int flush_offset = 1;
1128  int direct = 1;
1129 
1130  if (pic_arg) {
1131  pts = pic_arg->pts;
1132  display_picture_number = s->input_picture_number++;
1133 
1134  if (pts != AV_NOPTS_VALUE) {
1135  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1136  int64_t last = s->user_specified_pts;
1137 
1138  if (pts <= last) {
1139  av_log(s->avctx, AV_LOG_ERROR,
1140  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1141  pts, last);
1142  return AVERROR(EINVAL);
1143  }
1144 
1145  if (!s->low_delay && display_picture_number == 1)
1146  s->dts_delta = pts - last;
1147  }
1148  s->user_specified_pts = pts;
1149  } else {
1150  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1151  s->user_specified_pts =
1152  pts = s->user_specified_pts + 1;
1153  av_log(s->avctx, AV_LOG_INFO,
1154  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1155  pts);
1156  } else {
1157  pts = display_picture_number;
1158  }
1159  }
1160 
1161  if (pic_arg->linesize[0] != s->linesize ||
1162  pic_arg->linesize[1] != s->uvlinesize ||
1163  pic_arg->linesize[2] != s->uvlinesize)
1164  direct = 0;
1165  if ((s->width & 15) || (s->height & 15))
1166  direct = 0;
1167  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1168  direct = 0;
1169  if (s->linesize & (STRIDE_ALIGN-1))
1170  direct = 0;
1171 
1172  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1173  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1174 
1175  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1176  if (i < 0)
1177  return i;
1178 
1179  pic = &s->picture[i];
1180  pic->reference = 3;
1181 
1182  if (direct) {
1183  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1184  return ret;
1185  pic->shared = 1;
1186  } else {
1187  ret = alloc_picture(s, pic);
1188  if (ret < 0)
1189  return ret;
1190  ret = av_frame_copy_props(pic->f, pic_arg);
1191  if (ret < 0) {
1192  ff_mpeg_unref_picture(pic);
1193  return ret;
1194  }
1195 
1196  for (int i = 0; i < 3; i++) {
1197  ptrdiff_t src_stride = pic_arg->linesize[i];
1198  ptrdiff_t dst_stride = i ? s->uvlinesize : s->linesize;
1199  int h_shift = i ? s->chroma_x_shift : 0;
1200  int v_shift = i ? s->chroma_y_shift : 0;
1201  int w = AV_CEIL_RSHIFT(s->width , h_shift);
1202  int h = AV_CEIL_RSHIFT(s->height, v_shift);
1203  const uint8_t *src = pic_arg->data[i];
1204  uint8_t *dst = pic->f->data[i];
1205  int vpad = 16;
1206 
1207  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1208  && !s->progressive_sequence
1209  && FFALIGN(s->height, 32) - s->height > 16)
1210  vpad = 32;
1211 
1212  if (!s->avctx->rc_buffer_size)
1213  dst += INPLACE_OFFSET;
1214 
1215  if (src_stride == dst_stride)
1216  memcpy(dst, src, src_stride * h - src_stride + w);
1217  else {
1218  int h2 = h;
1219  uint8_t *dst2 = dst;
1220  while (h2--) {
1221  memcpy(dst2, src, w);
1222  dst2 += dst_stride;
1223  src += src_stride;
1224  }
1225  }
1226  if ((s->width & 15) || (s->height & (vpad-1))) {
1227  s->mpvencdsp.draw_edges(dst, dst_stride,
1228  w, h,
1229  16 >> h_shift,
1230  vpad >> v_shift,
1231  EDGE_BOTTOM);
1232  }
1233  }
1234  emms_c();
1235  }
1236 
1237  pic->display_picture_number = display_picture_number;
1238  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1239  } else {
1240  /* Flushing: When we have not received enough input frames,
1241  * ensure s->input_picture[0] contains the first picture */
1242  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1243  if (s->input_picture[flush_offset])
1244  break;
1245 
1246  if (flush_offset <= 1)
1247  flush_offset = 1;
1248  else
1249  encoding_delay = encoding_delay - flush_offset + 1;
1250  }
1251 
1252  /* shift buffer entries */
1253  for (int i = flush_offset; i <= MAX_B_FRAMES; i++)
1254  s->input_picture[i - flush_offset] = s->input_picture[i];
1255  for (int i = MAX_B_FRAMES + 1 - flush_offset; i <= MAX_B_FRAMES; i++)
1256  s->input_picture[i] = NULL;
1257 
1258  s->input_picture[encoding_delay] = pic;
1259 
1260  return 0;
1261 }
1262 
1263 static int skip_check(MpegEncContext *s, const Picture *p, const Picture *ref)
1264 {
1265  int x, y, plane;
1266  int score = 0;
1267  int64_t score64 = 0;
1268 
1269  for (plane = 0; plane < 3; plane++) {
1270  const int stride = p->f->linesize[plane];
1271  const int bw = plane ? 1 : 2;
1272  for (y = 0; y < s->mb_height * bw; y++) {
1273  for (x = 0; x < s->mb_width * bw; x++) {
1274  int off = p->shared ? 0 : 16;
1275  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1276  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1277  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1278 
1279  switch (FFABS(s->frame_skip_exp)) {
1280  case 0: score = FFMAX(score, v); break;
1281  case 1: score += FFABS(v); break;
1282  case 2: score64 += v * (int64_t)v; break;
1283  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1284  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1285  }
1286  }
1287  }
1288  }
1289  emms_c();
1290 
1291  if (score)
1292  score64 = score;
1293  if (s->frame_skip_exp < 0)
1294  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1295  -1.0/s->frame_skip_exp);
1296 
1297  if (score64 < s->frame_skip_threshold)
1298  return 1;
1299  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1300  return 1;
1301  return 0;
1302 }
1303 
1305 {
1306  int ret;
1307  int size = 0;
1308 
1310  if (ret < 0)
1311  return ret;
1312 
1313  do {
1315  if (ret >= 0) {
1316  size += pkt->size;
1318  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1319  return ret;
1320  } while (ret >= 0);
1321 
1322  return size;
1323 }
1324 
1326 {
1327  AVPacket *pkt;
1328  const int scale = s->brd_scale;
1329  int width = s->width >> scale;
1330  int height = s->height >> scale;
1331  int i, j, out_size, p_lambda, b_lambda, lambda2;
1332  int64_t best_rd = INT64_MAX;
1333  int best_b_count = -1;
1334  int ret = 0;
1335 
1336  av_assert0(scale >= 0 && scale <= 3);
1337 
1338  pkt = av_packet_alloc();
1339  if (!pkt)
1340  return AVERROR(ENOMEM);
1341 
1342  //emms_c();
1343  //s->next_picture_ptr->quality;
1344  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1345  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1346  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1347  if (!b_lambda) // FIXME we should do this somewhere else
1348  b_lambda = p_lambda;
1349  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1351 
1352  for (i = 0; i < s->max_b_frames + 2; i++) {
1353  const Picture *pre_input_ptr = i ? s->input_picture[i - 1] :
1354  s->next_picture_ptr;
1355 
1356  if (pre_input_ptr) {
1357  const uint8_t *data[4];
1358  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1359 
1360  if (!pre_input_ptr->shared && i) {
1361  data[0] += INPLACE_OFFSET;
1362  data[1] += INPLACE_OFFSET;
1363  data[2] += INPLACE_OFFSET;
1364  }
1365 
1366  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1367  s->tmp_frames[i]->linesize[0],
1368  data[0],
1369  pre_input_ptr->f->linesize[0],
1370  width, height);
1371  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1372  s->tmp_frames[i]->linesize[1],
1373  data[1],
1374  pre_input_ptr->f->linesize[1],
1375  width >> 1, height >> 1);
1376  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1377  s->tmp_frames[i]->linesize[2],
1378  data[2],
1379  pre_input_ptr->f->linesize[2],
1380  width >> 1, height >> 1);
1381  }
1382  }
1383 
1384  for (j = 0; j < s->max_b_frames + 1; j++) {
1385  AVCodecContext *c;
1386  int64_t rd = 0;
1387 
1388  if (!s->input_picture[j])
1389  break;
1390 
1392  if (!c) {
1393  ret = AVERROR(ENOMEM);
1394  goto fail;
1395  }
1396 
1397  c->width = width;
1398  c->height = height;
1400  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1401  c->mb_decision = s->avctx->mb_decision;
1402  c->me_cmp = s->avctx->me_cmp;
1403  c->mb_cmp = s->avctx->mb_cmp;
1404  c->me_sub_cmp = s->avctx->me_sub_cmp;
1405  c->pix_fmt = AV_PIX_FMT_YUV420P;
1406  c->time_base = s->avctx->time_base;
1407  c->max_b_frames = s->max_b_frames;
1408 
1409  ret = avcodec_open2(c, s->avctx->codec, NULL);
1410  if (ret < 0)
1411  goto fail;
1412 
1413 
1414  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1415  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1416 
1417  out_size = encode_frame(c, s->tmp_frames[0], pkt);
1418  if (out_size < 0) {
1419  ret = out_size;
1420  goto fail;
1421  }
1422 
1423  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1424 
1425  for (i = 0; i < s->max_b_frames + 1; i++) {
1426  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1427 
1428  s->tmp_frames[i + 1]->pict_type = is_p ?
1430  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1431 
1432  out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1433  if (out_size < 0) {
1434  ret = out_size;
1435  goto fail;
1436  }
1437 
1438  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1439  }
1440 
1441  /* get the delayed frames */
1443  if (out_size < 0) {
1444  ret = out_size;
1445  goto fail;
1446  }
1447  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1448 
1449  rd += c->error[0] + c->error[1] + c->error[2];
1450 
1451  if (rd < best_rd) {
1452  best_rd = rd;
1453  best_b_count = j;
1454  }
1455 
1456 fail:
1459  if (ret < 0) {
1460  best_b_count = ret;
1461  break;
1462  }
1463  }
1464 
1465  av_packet_free(&pkt);
1466 
1467  return best_b_count;
1468 }
1469 
1471 {
1472  int i, ret;
1473 
1474  for (int i = 1; i <= MAX_B_FRAMES; i++)
1475  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1476  s->reordered_input_picture[MAX_B_FRAMES] = NULL;
1477 
1478  /* set next picture type & ordering */
1479  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1480  if (s->frame_skip_threshold || s->frame_skip_factor) {
1481  if (s->picture_in_gop_number < s->gop_size &&
1482  s->next_picture_ptr &&
1483  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1484  // FIXME check that the gop check above is +-1 correct
1485  av_frame_unref(s->input_picture[0]->f);
1486 
1487  ff_vbv_update(s, 0);
1488 
1489  goto no_output_pic;
1490  }
1491  }
1492 
1493  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1494  !s->next_picture_ptr || s->intra_only) {
1495  s->reordered_input_picture[0] = s->input_picture[0];
1496  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1497  s->reordered_input_picture[0]->coded_picture_number =
1498  s->coded_picture_number++;
1499  } else {
1500  int b_frames = 0;
1501 
1502  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1503  for (i = 0; i < s->max_b_frames + 1; i++) {
1504  int pict_num = s->input_picture[0]->display_picture_number + i;
1505 
1506  if (pict_num >= s->rc_context.num_entries)
1507  break;
1508  if (!s->input_picture[i]) {
1509  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1510  break;
1511  }
1512 
1513  s->input_picture[i]->f->pict_type =
1514  s->rc_context.entry[pict_num].new_pict_type;
1515  }
1516  }
1517 
1518  if (s->b_frame_strategy == 0) {
1519  b_frames = s->max_b_frames;
1520  while (b_frames && !s->input_picture[b_frames])
1521  b_frames--;
1522  } else if (s->b_frame_strategy == 1) {
1523  for (i = 1; i < s->max_b_frames + 1; i++) {
1524  if (s->input_picture[i] &&
1525  s->input_picture[i]->b_frame_score == 0) {
1526  s->input_picture[i]->b_frame_score =
1528  s->input_picture[i ]->f->data[0],
1529  s->input_picture[i - 1]->f->data[0],
1530  s->linesize) + 1;
1531  }
1532  }
1533  for (i = 0; i < s->max_b_frames + 1; i++) {
1534  if (!s->input_picture[i] ||
1535  s->input_picture[i]->b_frame_score - 1 >
1536  s->mb_num / s->b_sensitivity)
1537  break;
1538  }
1539 
1540  b_frames = FFMAX(0, i - 1);
1541 
1542  /* reset scores */
1543  for (i = 0; i < b_frames + 1; i++) {
1544  s->input_picture[i]->b_frame_score = 0;
1545  }
1546  } else if (s->b_frame_strategy == 2) {
1547  b_frames = estimate_best_b_count(s);
1548  if (b_frames < 0) {
1549  ff_mpeg_unref_picture(s->input_picture[0]);
1550  return b_frames;
1551  }
1552  }
1553 
1554  emms_c();
1555 
1556  for (i = b_frames - 1; i >= 0; i--) {
1557  int type = s->input_picture[i]->f->pict_type;
1558  if (type && type != AV_PICTURE_TYPE_B)
1559  b_frames = i;
1560  }
1561  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1562  b_frames == s->max_b_frames) {
1563  av_log(s->avctx, AV_LOG_ERROR,
1564  "warning, too many B-frames in a row\n");
1565  }
1566 
1567  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1568  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1569  s->gop_size > s->picture_in_gop_number) {
1570  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1571  } else {
1572  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1573  b_frames = 0;
1574  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1575  }
1576  }
1577 
1578  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1579  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1580  b_frames--;
1581 
1582  s->reordered_input_picture[0] = s->input_picture[b_frames];
1583  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1584  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1585  s->reordered_input_picture[0]->coded_picture_number =
1586  s->coded_picture_number++;
1587  for (i = 0; i < b_frames; i++) {
1588  s->reordered_input_picture[i + 1] = s->input_picture[i];
1589  s->reordered_input_picture[i + 1]->f->pict_type =
1591  s->reordered_input_picture[i + 1]->coded_picture_number =
1592  s->coded_picture_number++;
1593  }
1594  }
1595  }
1596 no_output_pic:
1597  av_frame_unref(s->new_picture);
1598 
1599  if (s->reordered_input_picture[0]) {
1600  s->reordered_input_picture[0]->reference =
1601  s->reordered_input_picture[0]->f->pict_type !=
1602  AV_PICTURE_TYPE_B ? 3 : 0;
1603 
1604  if ((ret = av_frame_ref(s->new_picture,
1605  s->reordered_input_picture[0]->f)))
1606  goto fail;
1607 
1608  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1609  // input is a shared pix, so we can't modify it -> allocate a new
1610  // one & ensure that the shared one is reuseable
1611 
1612  Picture *pic;
1613  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1614  if (i < 0)
1615  return i;
1616  pic = &s->picture[i];
1617 
1618  pic->reference = s->reordered_input_picture[0]->reference;
1619  ret = alloc_picture(s, pic);
1620  if (ret < 0)
1621  goto fail;
1622 
1623  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1624  if (ret < 0) {
1625  ff_mpeg_unref_picture(pic);
1626  goto fail;
1627  }
1628  pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
1629  pic->display_picture_number = s->reordered_input_picture[0]->display_picture_number;
1630 
1631  /* mark us unused / free shared pic */
1632  av_frame_unref(s->reordered_input_picture[0]->f);
1633  s->reordered_input_picture[0]->shared = 0;
1634 
1635  s->current_picture_ptr = pic;
1636  } else {
1637  // input is not a shared pix -> reuse buffer for current_pix
1638  s->current_picture_ptr = s->reordered_input_picture[0];
1639  for (i = 0; i < 4; i++) {
1640  if (s->new_picture->data[i])
1641  s->new_picture->data[i] += INPLACE_OFFSET;
1642  }
1643  }
1644  s->picture_number = s->current_picture_ptr->display_picture_number;
1645 
1646  }
1647  return 0;
1648 fail:
1649  ff_mpeg_unref_picture(s->reordered_input_picture[0]);
1650  return ret;
1651 }
1652 
1654 {
1655  if (s->unrestricted_mv &&
1656  s->current_picture.reference &&
1657  !s->intra_only) {
1658  int hshift = s->chroma_x_shift;
1659  int vshift = s->chroma_y_shift;
1660  s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1661  s->current_picture.f->linesize[0],
1662  s->h_edge_pos, s->v_edge_pos,
1664  EDGE_TOP | EDGE_BOTTOM);
1665  s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1666  s->current_picture.f->linesize[1],
1667  s->h_edge_pos >> hshift,
1668  s->v_edge_pos >> vshift,
1669  EDGE_WIDTH >> hshift,
1670  EDGE_WIDTH >> vshift,
1671  EDGE_TOP | EDGE_BOTTOM);
1672  s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1673  s->current_picture.f->linesize[2],
1674  s->h_edge_pos >> hshift,
1675  s->v_edge_pos >> vshift,
1676  EDGE_WIDTH >> hshift,
1677  EDGE_WIDTH >> vshift,
1678  EDGE_TOP | EDGE_BOTTOM);
1679  }
1680 
1681  emms_c();
1682 
1683  s->last_pict_type = s->pict_type;
1684  s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1685  if (s->pict_type!= AV_PICTURE_TYPE_B)
1686  s->last_non_b_pict_type = s->pict_type;
1687 }
1688 
1690 {
1691  int intra, i;
1692 
1693  for (intra = 0; intra < 2; intra++) {
1694  if (s->dct_count[intra] > (1 << 16)) {
1695  for (i = 0; i < 64; i++) {
1696  s->dct_error_sum[intra][i] >>= 1;
1697  }
1698  s->dct_count[intra] >>= 1;
1699  }
1700 
1701  for (i = 0; i < 64; i++) {
1702  s->dct_offset[intra][i] = (s->noise_reduction *
1703  s->dct_count[intra] +
1704  s->dct_error_sum[intra][i] / 2) /
1705  (s->dct_error_sum[intra][i] + 1);
1706  }
1707  }
1708 }
1709 
1711 {
1712  int ret;
1713 
1714  /* mark & release old frames */
1715  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1716  s->last_picture_ptr != s->next_picture_ptr &&
1717  s->last_picture_ptr->f->buf[0]) {
1718  ff_mpeg_unref_picture(s->last_picture_ptr);
1719  }
1720 
1721  s->current_picture_ptr->f->pict_type = s->pict_type;
1722 
1723  ff_mpeg_unref_picture(&s->current_picture);
1724  if ((ret = ff_mpeg_ref_picture(&s->current_picture,
1725  s->current_picture_ptr)) < 0)
1726  return ret;
1727 
1728  if (s->pict_type != AV_PICTURE_TYPE_B) {
1729  s->last_picture_ptr = s->next_picture_ptr;
1730  s->next_picture_ptr = s->current_picture_ptr;
1731  }
1732 
1733  if (s->last_picture_ptr) {
1734  ff_mpeg_unref_picture(&s->last_picture);
1735  if (s->last_picture_ptr->f->buf[0] &&
1736  (ret = ff_mpeg_ref_picture(&s->last_picture,
1737  s->last_picture_ptr)) < 0)
1738  return ret;
1739  }
1740  if (s->next_picture_ptr) {
1741  ff_mpeg_unref_picture(&s->next_picture);
1742  if (s->next_picture_ptr->f->buf[0] &&
1743  (ret = ff_mpeg_ref_picture(&s->next_picture,
1744  s->next_picture_ptr)) < 0)
1745  return ret;
1746  }
1747 
1748  if (s->dct_error_sum) {
1749  av_assert2(s->noise_reduction && s->encoding);
1751  }
1752 
1753  return 0;
1754 }
1755 
1757  const AVFrame *pic_arg, int *got_packet)
1758 {
1760  int i, stuffing_count, ret;
1761  int context_count = s->slice_context_count;
1762 
1763  s->vbv_ignore_qmax = 0;
1764 
1765  s->picture_in_gop_number++;
1766 
1767  if (load_input_picture(s, pic_arg) < 0)
1768  return -1;
1769 
1770  if (select_input_picture(s) < 0) {
1771  return -1;
1772  }
1773 
1774  /* output? */
1775  if (s->new_picture->data[0]) {
1776  int growing_buffer = context_count == 1 && !s->data_partitioning;
1777  size_t pkt_size = 10000 + s->mb_width * s->mb_height *
1778  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1779  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1780  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_picture, &pkt_size);
1781  if (ret < 0)
1782  return ret;
1783  }
1784  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1785  return ret;
1787  if (s->mb_info) {
1788  s->mb_info_ptr = av_packet_new_side_data(pkt,
1790  s->mb_width*s->mb_height*12);
1791  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1792  }
1793 
1794  for (i = 0; i < context_count; i++) {
1795  int start_y = s->thread_context[i]->start_mb_y;
1796  int end_y = s->thread_context[i]-> end_mb_y;
1797  int h = s->mb_height;
1798  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1799  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1800 
1801  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1802  }
1803 
1804  s->pict_type = s->new_picture->pict_type;
1805  //emms_c();
1806  ret = frame_start(s);
1807  if (ret < 0)
1808  return ret;
1809 vbv_retry:
1810  ret = encode_picture(s);
1811  if (growing_buffer) {
1812  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1813  pkt->data = s->pb.buf;
1815  }
1816  if (ret < 0)
1817  return -1;
1818 
1819  frame_end(s);
1820 
1821  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1822  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1823 
1824  if (avctx->rc_buffer_size) {
1825  RateControlContext *rcc = &s->rc_context;
1826  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1827  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1828  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1829 
1830  if (put_bits_count(&s->pb) > max_size &&
1831  s->lambda < s->lmax) {
1832  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1833  (s->qscale + 1) / s->qscale);
1834  if (s->adaptive_quant) {
1835  int i;
1836  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1837  s->lambda_table[i] =
1838  FFMAX(s->lambda_table[i] + min_step,
1839  s->lambda_table[i] * (s->qscale + 1) /
1840  s->qscale);
1841  }
1842  s->mb_skipped = 0; // done in frame_start()
1843  // done in encode_picture() so we must undo it
1844  if (s->pict_type == AV_PICTURE_TYPE_P) {
1845  if (s->flipflop_rounding ||
1846  s->codec_id == AV_CODEC_ID_H263P ||
1847  s->codec_id == AV_CODEC_ID_MPEG4)
1848  s->no_rounding ^= 1;
1849  }
1850  if (s->pict_type != AV_PICTURE_TYPE_B) {
1851  s->time_base = s->last_time_base;
1852  s->last_non_b_time = s->time - s->pp_time;
1853  }
1854  for (i = 0; i < context_count; i++) {
1855  PutBitContext *pb = &s->thread_context[i]->pb;
1856  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1857  }
1858  s->vbv_ignore_qmax = 1;
1859  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1860  goto vbv_retry;
1861  }
1862 
1864  }
1865 
1868 
1869  for (i = 0; i < 4; i++) {
1870  avctx->error[i] += s->encoding_error[i];
1871  }
1872  ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1873  s->encoding_error,
1875  s->pict_type);
1876 
1878  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1879  s->misc_bits + s->i_tex_bits +
1880  s->p_tex_bits);
1881  flush_put_bits(&s->pb);
1882  s->frame_bits = put_bits_count(&s->pb);
1883 
1884  stuffing_count = ff_vbv_update(s, s->frame_bits);
1885  s->stuffing_bits = 8*stuffing_count;
1886  if (stuffing_count) {
1887  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1888  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1889  return -1;
1890  }
1891 
1892  switch (s->codec_id) {
1895  while (stuffing_count--) {
1896  put_bits(&s->pb, 8, 0);
1897  }
1898  break;
1899  case AV_CODEC_ID_MPEG4:
1900  put_bits(&s->pb, 16, 0);
1901  put_bits(&s->pb, 16, 0x1C3);
1902  stuffing_count -= 4;
1903  while (stuffing_count--) {
1904  put_bits(&s->pb, 8, 0xFF);
1905  }
1906  break;
1907  default:
1908  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1909  s->stuffing_bits = 0;
1910  }
1911  flush_put_bits(&s->pb);
1912  s->frame_bits = put_bits_count(&s->pb);
1913  }
1914 
1915  /* update MPEG-1/2 vbv_delay for CBR */
1916  if (avctx->rc_max_rate &&
1918  s->out_format == FMT_MPEG1 &&
1919  90000LL * (avctx->rc_buffer_size - 1) <=
1920  avctx->rc_max_rate * 0xFFFFLL) {
1921  AVCPBProperties *props;
1922  size_t props_size;
1923 
1924  int vbv_delay, min_delay;
1925  double inbits = avctx->rc_max_rate *
1927  int minbits = s->frame_bits - 8 *
1928  (s->vbv_delay_pos - 1);
1929  double bits = s->rc_context.buffer_index + minbits - inbits;
1930  uint8_t *const vbv_delay_ptr = s->pb.buf + s->vbv_delay_pos;
1931 
1932  if (bits < 0)
1934  "Internal error, negative bits\n");
1935 
1936  av_assert1(s->repeat_first_field == 0);
1937 
1938  vbv_delay = bits * 90000 / avctx->rc_max_rate;
1939  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1940  avctx->rc_max_rate;
1941 
1942  vbv_delay = FFMAX(vbv_delay, min_delay);
1943 
1944  av_assert0(vbv_delay < 0xFFFF);
1945 
1946  vbv_delay_ptr[0] &= 0xF8;
1947  vbv_delay_ptr[0] |= vbv_delay >> 13;
1948  vbv_delay_ptr[1] = vbv_delay >> 5;
1949  vbv_delay_ptr[2] &= 0x07;
1950  vbv_delay_ptr[2] |= vbv_delay << 3;
1951 
1952  props = av_cpb_properties_alloc(&props_size);
1953  if (!props)
1954  return AVERROR(ENOMEM);
1955  props->vbv_delay = vbv_delay * 300;
1956 
1958  (uint8_t*)props, props_size);
1959  if (ret < 0) {
1960  av_freep(&props);
1961  return ret;
1962  }
1963  }
1964  s->total_bits += s->frame_bits;
1965 
1966  pkt->pts = s->current_picture.f->pts;
1967  pkt->duration = s->current_picture.f->duration;
1968  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1969  if (!s->current_picture.coded_picture_number)
1970  pkt->dts = pkt->pts - s->dts_delta;
1971  else
1972  pkt->dts = s->reordered_pts;
1973  s->reordered_pts = pkt->pts;
1974  } else
1975  pkt->dts = pkt->pts;
1976 
1977  // the no-delay case is handled in generic code
1979  ret = ff_encode_reordered_opaque(avctx, pkt, s->current_picture.f);
1980  if (ret < 0)
1981  return ret;
1982  }
1983 
1984  if (s->current_picture.f->flags & AV_FRAME_FLAG_KEY)
1986  if (s->mb_info)
1988  } else {
1989  s->frame_bits = 0;
1990  }
1991 
1992  /* release non-reference frames */
1993  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1994  if (!s->picture[i].reference)
1995  ff_mpeg_unref_picture(&s->picture[i]);
1996  }
1997 
1998  av_assert1((s->frame_bits & 7) == 0);
1999 
2000  pkt->size = s->frame_bits / 8;
2001  *got_packet = !!pkt->size;
2002  return 0;
2003 }
2004 
2006  int n, int threshold)
2007 {
2008  static const char tab[64] = {
2009  3, 2, 2, 1, 1, 1, 1, 1,
2010  1, 1, 1, 1, 1, 1, 1, 1,
2011  1, 1, 1, 1, 1, 1, 1, 1,
2012  0, 0, 0, 0, 0, 0, 0, 0,
2013  0, 0, 0, 0, 0, 0, 0, 0,
2014  0, 0, 0, 0, 0, 0, 0, 0,
2015  0, 0, 0, 0, 0, 0, 0, 0,
2016  0, 0, 0, 0, 0, 0, 0, 0
2017  };
2018  int score = 0;
2019  int run = 0;
2020  int i;
2021  int16_t *block = s->block[n];
2022  const int last_index = s->block_last_index[n];
2023  int skip_dc;
2024 
2025  if (threshold < 0) {
2026  skip_dc = 0;
2027  threshold = -threshold;
2028  } else
2029  skip_dc = 1;
2030 
2031  /* Are all we could set to zero already zero? */
2032  if (last_index <= skip_dc - 1)
2033  return;
2034 
2035  for (i = 0; i <= last_index; i++) {
2036  const int j = s->intra_scantable.permutated[i];
2037  const int level = FFABS(block[j]);
2038  if (level == 1) {
2039  if (skip_dc && i == 0)
2040  continue;
2041  score += tab[run];
2042  run = 0;
2043  } else if (level > 1) {
2044  return;
2045  } else {
2046  run++;
2047  }
2048  }
2049  if (score >= threshold)
2050  return;
2051  for (i = skip_dc; i <= last_index; i++) {
2052  const int j = s->intra_scantable.permutated[i];
2053  block[j] = 0;
2054  }
2055  if (block[0])
2056  s->block_last_index[n] = 0;
2057  else
2058  s->block_last_index[n] = -1;
2059 }
2060 
2061 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2062  int last_index)
2063 {
2064  int i;
2065  const int maxlevel = s->max_qcoeff;
2066  const int minlevel = s->min_qcoeff;
2067  int overflow = 0;
2068 
2069  if (s->mb_intra) {
2070  i = 1; // skip clipping of intra dc
2071  } else
2072  i = 0;
2073 
2074  for (; i <= last_index; i++) {
2075  const int j = s->intra_scantable.permutated[i];
2076  int level = block[j];
2077 
2078  if (level > maxlevel) {
2079  level = maxlevel;
2080  overflow++;
2081  } else if (level < minlevel) {
2082  level = minlevel;
2083  overflow++;
2084  }
2085 
2086  block[j] = level;
2087  }
2088 
2089  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2090  av_log(s->avctx, AV_LOG_INFO,
2091  "warning, clipping %d dct coefficients to %d..%d\n",
2092  overflow, minlevel, maxlevel);
2093 }
2094 
2095 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2096 {
2097  int x, y;
2098  // FIXME optimize
2099  for (y = 0; y < 8; y++) {
2100  for (x = 0; x < 8; x++) {
2101  int x2, y2;
2102  int sum = 0;
2103  int sqr = 0;
2104  int count = 0;
2105 
2106  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2107  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2108  int v = ptr[x2 + y2 * stride];
2109  sum += v;
2110  sqr += v * v;
2111  count++;
2112  }
2113  }
2114  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2115  }
2116  }
2117 }
2118 
2120  int motion_x, int motion_y,
2121  int mb_block_height,
2122  int mb_block_width,
2123  int mb_block_count,
2124  int chroma_x_shift,
2125  int chroma_y_shift,
2126  int chroma_format)
2127 {
2128 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2129  * and neither of these encoders currently supports 444. */
2130 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2131  (s)->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2132  int16_t weight[12][64];
2133  int16_t orig[12][64];
2134  const int mb_x = s->mb_x;
2135  const int mb_y = s->mb_y;
2136  int i;
2137  int skip_dct[12];
2138  int dct_offset = s->linesize * 8; // default for progressive frames
2139  int uv_dct_offset = s->uvlinesize * 8;
2140  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2141  ptrdiff_t wrap_y, wrap_c;
2142 
2143  for (i = 0; i < mb_block_count; i++)
2144  skip_dct[i] = s->skipdct;
2145 
2146  if (s->adaptive_quant) {
2147  const int last_qp = s->qscale;
2148  const int mb_xy = mb_x + mb_y * s->mb_stride;
2149 
2150  s->lambda = s->lambda_table[mb_xy];
2151  update_qscale(s);
2152 
2153  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2154  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2155  s->dquant = s->qscale - last_qp;
2156 
2157  if (s->out_format == FMT_H263) {
2158  s->dquant = av_clip(s->dquant, -2, 2);
2159 
2160  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2161  if (!s->mb_intra) {
2162  if (s->pict_type == AV_PICTURE_TYPE_B) {
2163  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2164  s->dquant = 0;
2165  }
2166  if (s->mv_type == MV_TYPE_8X8)
2167  s->dquant = 0;
2168  }
2169  }
2170  }
2171  }
2172  ff_set_qscale(s, last_qp + s->dquant);
2173  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2174  ff_set_qscale(s, s->qscale + s->dquant);
2175 
2176  wrap_y = s->linesize;
2177  wrap_c = s->uvlinesize;
2178  ptr_y = s->new_picture->data[0] +
2179  (mb_y * 16 * wrap_y) + mb_x * 16;
2180  ptr_cb = s->new_picture->data[1] +
2181  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2182  ptr_cr = s->new_picture->data[2] +
2183  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2184 
2185  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2186  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2187  int cw = (s->width + chroma_x_shift) >> chroma_x_shift;
2188  int ch = (s->height + chroma_y_shift) >> chroma_y_shift;
2189  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2190  wrap_y, wrap_y,
2191  16, 16, mb_x * 16, mb_y * 16,
2192  s->width, s->height);
2193  ptr_y = ebuf;
2194  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2195  wrap_c, wrap_c,
2196  mb_block_width, mb_block_height,
2197  mb_x * mb_block_width, mb_y * mb_block_height,
2198  cw, ch);
2199  ptr_cb = ebuf + 16 * wrap_y;
2200  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2201  wrap_c, wrap_c,
2202  mb_block_width, mb_block_height,
2203  mb_x * mb_block_width, mb_y * mb_block_height,
2204  cw, ch);
2205  ptr_cr = ebuf + 16 * wrap_y + 16;
2206  }
2207 
2208  if (s->mb_intra) {
2209  if (INTERLACED_DCT(s)) {
2210  int progressive_score, interlaced_score;
2211 
2212  s->interlaced_dct = 0;
2213  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2214  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2215  NULL, wrap_y, 8) - 400;
2216 
2217  if (progressive_score > 0) {
2218  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2219  NULL, wrap_y * 2, 8) +
2220  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2221  NULL, wrap_y * 2, 8);
2222  if (progressive_score > interlaced_score) {
2223  s->interlaced_dct = 1;
2224 
2225  dct_offset = wrap_y;
2226  uv_dct_offset = wrap_c;
2227  wrap_y <<= 1;
2228  if (chroma_format == CHROMA_422 ||
2230  wrap_c <<= 1;
2231  }
2232  }
2233  }
2234 
2235  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2236  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2237  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2238  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2239 
2240  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2241  skip_dct[4] = 1;
2242  skip_dct[5] = 1;
2243  } else {
2244  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2245  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2246  if (chroma_format == CHROMA_422) {
2247  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2248  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2249  } else if (chroma_format == CHROMA_444) {
2250  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2251  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2252  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2253  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2254  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2255  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2256  }
2257  }
2258  } else {
2259  op_pixels_func (*op_pix)[4];
2260  qpel_mc_func (*op_qpix)[16];
2261  uint8_t *dest_y, *dest_cb, *dest_cr;
2262 
2263  dest_y = s->dest[0];
2264  dest_cb = s->dest[1];
2265  dest_cr = s->dest[2];
2266 
2267  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2268  op_pix = s->hdsp.put_pixels_tab;
2269  op_qpix = s->qdsp.put_qpel_pixels_tab;
2270  } else {
2271  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2272  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2273  }
2274 
2275  if (s->mv_dir & MV_DIR_FORWARD) {
2276  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2277  s->last_picture.f->data,
2278  op_pix, op_qpix);
2279  op_pix = s->hdsp.avg_pixels_tab;
2280  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2281  }
2282  if (s->mv_dir & MV_DIR_BACKWARD) {
2283  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2284  s->next_picture.f->data,
2285  op_pix, op_qpix);
2286  }
2287 
2288  if (INTERLACED_DCT(s)) {
2289  int progressive_score, interlaced_score;
2290 
2291  s->interlaced_dct = 0;
2292  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2293  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2294  ptr_y + wrap_y * 8,
2295  wrap_y, 8) - 400;
2296 
2297  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2298  progressive_score -= 400;
2299 
2300  if (progressive_score > 0) {
2301  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2302  wrap_y * 2, 8) +
2303  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2304  ptr_y + wrap_y,
2305  wrap_y * 2, 8);
2306 
2307  if (progressive_score > interlaced_score) {
2308  s->interlaced_dct = 1;
2309 
2310  dct_offset = wrap_y;
2311  uv_dct_offset = wrap_c;
2312  wrap_y <<= 1;
2313  if (chroma_format == CHROMA_422)
2314  wrap_c <<= 1;
2315  }
2316  }
2317  }
2318 
2319  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2320  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2321  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2322  dest_y + dct_offset, wrap_y);
2323  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2324  dest_y + dct_offset + 8, wrap_y);
2325 
2326  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2327  skip_dct[4] = 1;
2328  skip_dct[5] = 1;
2329  } else {
2330  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2331  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2332  if (!chroma_y_shift) { /* 422 */
2333  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2334  dest_cb + uv_dct_offset, wrap_c);
2335  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2336  dest_cr + uv_dct_offset, wrap_c);
2337  }
2338  }
2339  /* pre quantization */
2340  if (s->mc_mb_var[s->mb_stride * mb_y + mb_x] < 2 * s->qscale * s->qscale) {
2341  // FIXME optimize
2342  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2343  skip_dct[0] = 1;
2344  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2345  skip_dct[1] = 1;
2346  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2347  wrap_y, 8) < 20 * s->qscale)
2348  skip_dct[2] = 1;
2349  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2350  wrap_y, 8) < 20 * s->qscale)
2351  skip_dct[3] = 1;
2352  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2353  skip_dct[4] = 1;
2354  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2355  skip_dct[5] = 1;
2356  if (!chroma_y_shift) { /* 422 */
2357  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2358  dest_cb + uv_dct_offset,
2359  wrap_c, 8) < 20 * s->qscale)
2360  skip_dct[6] = 1;
2361  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2362  dest_cr + uv_dct_offset,
2363  wrap_c, 8) < 20 * s->qscale)
2364  skip_dct[7] = 1;
2365  }
2366  }
2367  }
2368 
2369  if (s->quantizer_noise_shaping) {
2370  if (!skip_dct[0])
2371  get_visual_weight(weight[0], ptr_y , wrap_y);
2372  if (!skip_dct[1])
2373  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2374  if (!skip_dct[2])
2375  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2376  if (!skip_dct[3])
2377  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2378  if (!skip_dct[4])
2379  get_visual_weight(weight[4], ptr_cb , wrap_c);
2380  if (!skip_dct[5])
2381  get_visual_weight(weight[5], ptr_cr , wrap_c);
2382  if (!chroma_y_shift) { /* 422 */
2383  if (!skip_dct[6])
2384  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2385  wrap_c);
2386  if (!skip_dct[7])
2387  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2388  wrap_c);
2389  }
2390  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2391  }
2392 
2393  /* DCT & quantize */
2394  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2395  {
2396  for (i = 0; i < mb_block_count; i++) {
2397  if (!skip_dct[i]) {
2398  int overflow;
2399  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2400  // FIXME we could decide to change to quantizer instead of
2401  // clipping
2402  // JS: I don't think that would be a good idea it could lower
2403  // quality instead of improve it. Just INTRADC clipping
2404  // deserves changes in quantizer
2405  if (overflow)
2406  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2407  } else
2408  s->block_last_index[i] = -1;
2409  }
2410  if (s->quantizer_noise_shaping) {
2411  for (i = 0; i < mb_block_count; i++) {
2412  if (!skip_dct[i]) {
2413  s->block_last_index[i] =
2414  dct_quantize_refine(s, s->block[i], weight[i],
2415  orig[i], i, s->qscale);
2416  }
2417  }
2418  }
2419 
2420  if (s->luma_elim_threshold && !s->mb_intra)
2421  for (i = 0; i < 4; i++)
2422  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2423  if (s->chroma_elim_threshold && !s->mb_intra)
2424  for (i = 4; i < mb_block_count; i++)
2425  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2426 
2427  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2428  for (i = 0; i < mb_block_count; i++) {
2429  if (s->block_last_index[i] == -1)
2430  s->coded_score[i] = INT_MAX / 256;
2431  }
2432  }
2433  }
2434 
2435  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2436  s->block_last_index[4] =
2437  s->block_last_index[5] = 0;
2438  s->block[4][0] =
2439  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2440  if (!chroma_y_shift) { /* 422 / 444 */
2441  for (i=6; i<12; i++) {
2442  s->block_last_index[i] = 0;
2443  s->block[i][0] = s->block[4][0];
2444  }
2445  }
2446  }
2447 
2448  // non c quantize code returns incorrect block_last_index FIXME
2449  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2450  for (i = 0; i < mb_block_count; i++) {
2451  int j;
2452  if (s->block_last_index[i] > 0) {
2453  for (j = 63; j > 0; j--) {
2454  if (s->block[i][s->intra_scantable.permutated[j]])
2455  break;
2456  }
2457  s->block_last_index[i] = j;
2458  }
2459  }
2460  }
2461 
2462  /* huffman encode */
2463  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2466  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2467  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2468  break;
2469  case AV_CODEC_ID_MPEG4:
2470  if (CONFIG_MPEG4_ENCODER)
2471  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2472  break;
2473  case AV_CODEC_ID_MSMPEG4V2:
2474  case AV_CODEC_ID_MSMPEG4V3:
2475  case AV_CODEC_ID_WMV1:
2476  if (CONFIG_MSMPEG4ENC)
2477  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2478  break;
2479  case AV_CODEC_ID_WMV2:
2480  if (CONFIG_WMV2_ENCODER)
2481  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2482  break;
2483  case AV_CODEC_ID_H261:
2484  if (CONFIG_H261_ENCODER)
2485  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2486  break;
2487  case AV_CODEC_ID_H263:
2488  case AV_CODEC_ID_H263P:
2489  case AV_CODEC_ID_FLV1:
2490  case AV_CODEC_ID_RV10:
2491  case AV_CODEC_ID_RV20:
2492  if (CONFIG_H263_ENCODER)
2493  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2494  break;
2495 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2496  case AV_CODEC_ID_MJPEG:
2497  case AV_CODEC_ID_AMV:
2498  ff_mjpeg_encode_mb(s, s->block);
2499  break;
2500 #endif
2501  case AV_CODEC_ID_SPEEDHQ:
2502  if (CONFIG_SPEEDHQ_ENCODER)
2503  ff_speedhq_encode_mb(s, s->block);
2504  break;
2505  default:
2506  av_assert1(0);
2507  }
2508 }
2509 
2510 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2511 {
2512  if (s->chroma_format == CHROMA_420)
2513  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2514  else if (s->chroma_format == CHROMA_422)
2515  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2516  else
2517  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2518 }
2519 
2521  const MpegEncContext *s)
2522 {
2523  int i;
2524 
2525  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2526 
2527  /* MPEG-1 */
2528  d->mb_skip_run= s->mb_skip_run;
2529  for(i=0; i<3; i++)
2530  d->last_dc[i] = s->last_dc[i];
2531 
2532  /* statistics */
2533  d->mv_bits= s->mv_bits;
2534  d->i_tex_bits= s->i_tex_bits;
2535  d->p_tex_bits= s->p_tex_bits;
2536  d->i_count= s->i_count;
2537  d->skip_count= s->skip_count;
2538  d->misc_bits= s->misc_bits;
2539  d->last_bits= 0;
2540 
2541  d->mb_skipped= 0;
2542  d->qscale= s->qscale;
2543  d->dquant= s->dquant;
2544 
2545  d->esc3_level_length= s->esc3_level_length;
2546 }
2547 
2549  const MpegEncContext *s)
2550 {
2551  int i;
2552 
2553  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2554  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2555 
2556  /* MPEG-1 */
2557  d->mb_skip_run= s->mb_skip_run;
2558  for(i=0; i<3; i++)
2559  d->last_dc[i] = s->last_dc[i];
2560 
2561  /* statistics */
2562  d->mv_bits= s->mv_bits;
2563  d->i_tex_bits= s->i_tex_bits;
2564  d->p_tex_bits= s->p_tex_bits;
2565  d->i_count= s->i_count;
2566  d->skip_count= s->skip_count;
2567  d->misc_bits= s->misc_bits;
2568 
2569  d->mb_intra= s->mb_intra;
2570  d->mb_skipped= s->mb_skipped;
2571  d->mv_type= s->mv_type;
2572  d->mv_dir= s->mv_dir;
2573  d->pb= s->pb;
2574  if(s->data_partitioning){
2575  d->pb2= s->pb2;
2576  d->tex_pb= s->tex_pb;
2577  }
2578  d->block= s->block;
2579  for(i=0; i<8; i++)
2580  d->block_last_index[i]= s->block_last_index[i];
2581  d->interlaced_dct= s->interlaced_dct;
2582  d->qscale= s->qscale;
2583 
2584  d->esc3_level_length= s->esc3_level_length;
2585 }
2586 
2587 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best,
2589  int *dmin, int *next_block, int motion_x, int motion_y)
2590 {
2591  int score;
2592  uint8_t *dest_backup[3];
2593 
2594  copy_context_before_encode(s, backup);
2595 
2596  s->block= s->blocks[*next_block];
2597  s->pb= pb[*next_block];
2598  if(s->data_partitioning){
2599  s->pb2 = pb2 [*next_block];
2600  s->tex_pb= tex_pb[*next_block];
2601  }
2602 
2603  if(*next_block){
2604  memcpy(dest_backup, s->dest, sizeof(s->dest));
2605  s->dest[0] = s->sc.rd_scratchpad;
2606  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2607  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2608  av_assert0(s->linesize >= 32); //FIXME
2609  }
2610 
2611  encode_mb(s, motion_x, motion_y);
2612 
2613  score= put_bits_count(&s->pb);
2614  if(s->data_partitioning){
2615  score+= put_bits_count(&s->pb2);
2616  score+= put_bits_count(&s->tex_pb);
2617  }
2618 
2619  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2620  mpv_reconstruct_mb(s, s->block);
2621 
2622  score *= s->lambda2;
2623  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2624  }
2625 
2626  if(*next_block){
2627  memcpy(s->dest, dest_backup, sizeof(s->dest));
2628  }
2629 
2630  if(score<*dmin){
2631  *dmin= score;
2632  *next_block^=1;
2633 
2635  }
2636 }
2637 
2638 static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride){
2639  const uint32_t *sq = ff_square_tab + 256;
2640  int acc=0;
2641  int x,y;
2642 
2643  if(w==16 && h==16)
2644  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2645  else if(w==8 && h==8)
2646  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2647 
2648  for(y=0; y<h; y++){
2649  for(x=0; x<w; x++){
2650  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2651  }
2652  }
2653 
2654  av_assert2(acc>=0);
2655 
2656  return acc;
2657 }
2658 
2659 static int sse_mb(MpegEncContext *s){
2660  int w= 16;
2661  int h= 16;
2662  int chroma_mb_w = w >> s->chroma_x_shift;
2663  int chroma_mb_h = h >> s->chroma_y_shift;
2664 
2665  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2666  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2667 
2668  if(w==16 && h==16)
2669  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2670  return s->mecc.nsse[0](s, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2671  s->dest[0], s->linesize, 16) +
2672  s->mecc.nsse[1](s, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2673  s->dest[1], s->uvlinesize, chroma_mb_h) +
2674  s->mecc.nsse[1](s, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2675  s->dest[2], s->uvlinesize, chroma_mb_h);
2676  }else{
2677  return s->mecc.sse[0](NULL, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2678  s->dest[0], s->linesize, 16) +
2679  s->mecc.sse[1](NULL, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2680  s->dest[1], s->uvlinesize, chroma_mb_h) +
2681  s->mecc.sse[1](NULL, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2682  s->dest[2], s->uvlinesize, chroma_mb_h);
2683  }
2684  else
2685  return sse(s, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2686  s->dest[0], w, h, s->linesize) +
2687  sse(s, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2688  s->dest[1], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize) +
2689  sse(s, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2690  s->dest[2], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize);
2691 }
2692 
2694  MpegEncContext *s= *(void**)arg;
2695 
2696 
2697  s->me.pre_pass=1;
2698  s->me.dia_size= s->avctx->pre_dia_size;
2699  s->first_slice_line=1;
2700  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2701  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2702  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2703  }
2704  s->first_slice_line=0;
2705  }
2706 
2707  s->me.pre_pass=0;
2708 
2709  return 0;
2710 }
2711 
2713  MpegEncContext *s= *(void**)arg;
2714 
2715  s->me.dia_size= s->avctx->dia_size;
2716  s->first_slice_line=1;
2717  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2718  s->mb_x=0; //for block init below
2720  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2721  s->block_index[0]+=2;
2722  s->block_index[1]+=2;
2723  s->block_index[2]+=2;
2724  s->block_index[3]+=2;
2725 
2726  /* compute motion vector & mb_type and store in context */
2727  if(s->pict_type==AV_PICTURE_TYPE_B)
2728  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2729  else
2730  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2731  }
2732  s->first_slice_line=0;
2733  }
2734  return 0;
2735 }
2736 
2737 static int mb_var_thread(AVCodecContext *c, void *arg){
2738  MpegEncContext *s= *(void**)arg;
2739  int mb_x, mb_y;
2740 
2741  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2742  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2743  int xx = mb_x * 16;
2744  int yy = mb_y * 16;
2745  const uint8_t *pix = s->new_picture->data[0] + (yy * s->linesize) + xx;
2746  int varc;
2747  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2748 
2749  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2750  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2751 
2752  s->mb_var [s->mb_stride * mb_y + mb_x] = varc;
2753  s->mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2754  s->me.mb_var_sum_temp += varc;
2755  }
2756  }
2757  return 0;
2758 }
2759 
2761  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2762  if(s->partitioned_frame){
2764  }
2765 
2766  ff_mpeg4_stuffing(&s->pb);
2767  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2768  s->out_format == FMT_MJPEG) {
2770  } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2772  }
2773 
2774  flush_put_bits(&s->pb);
2775 
2776  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2777  s->misc_bits+= get_bits_diff(s);
2778 }
2779 
2781 {
2782  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2783  int offset = put_bits_count(&s->pb);
2784  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2785  int gobn = s->mb_y / s->gob_index;
2786  int pred_x, pred_y;
2787  if (CONFIG_H263_ENCODER)
2788  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2789  bytestream_put_le32(&ptr, offset);
2790  bytestream_put_byte(&ptr, s->qscale);
2791  bytestream_put_byte(&ptr, gobn);
2792  bytestream_put_le16(&ptr, mba);
2793  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2794  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2795  /* 4MV not implemented */
2796  bytestream_put_byte(&ptr, 0); /* hmv2 */
2797  bytestream_put_byte(&ptr, 0); /* vmv2 */
2798 }
2799 
2800 static void update_mb_info(MpegEncContext *s, int startcode)
2801 {
2802  if (!s->mb_info)
2803  return;
2804  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2805  s->mb_info_size += 12;
2806  s->prev_mb_info = s->last_mb_info;
2807  }
2808  if (startcode) {
2809  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2810  /* This might have incremented mb_info_size above, and we return without
2811  * actually writing any info into that slot yet. But in that case,
2812  * this will be called again at the start of the after writing the
2813  * start code, actually writing the mb info. */
2814  return;
2815  }
2816 
2817  s->last_mb_info = put_bytes_count(&s->pb, 0);
2818  if (!s->mb_info_size)
2819  s->mb_info_size += 12;
2820  write_mb_info(s);
2821 }
2822 
2823 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2824 {
2825  if (put_bytes_left(&s->pb, 0) < threshold
2826  && s->slice_context_count == 1
2827  && s->pb.buf == s->avctx->internal->byte_buffer) {
2828  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2829 
2830  uint8_t *new_buffer = NULL;
2831  int new_buffer_size = 0;
2832 
2833  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2834  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2835  return AVERROR(ENOMEM);
2836  }
2837 
2838  emms_c();
2839 
2840  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2841  s->avctx->internal->byte_buffer_size + size_increase);
2842  if (!new_buffer)
2843  return AVERROR(ENOMEM);
2844 
2845  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2846  av_free(s->avctx->internal->byte_buffer);
2847  s->avctx->internal->byte_buffer = new_buffer;
2848  s->avctx->internal->byte_buffer_size = new_buffer_size;
2849  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2850  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2851  }
2852  if (put_bytes_left(&s->pb, 0) < threshold)
2853  return AVERROR(EINVAL);
2854  return 0;
2855 }
2856 
2857 static int encode_thread(AVCodecContext *c, void *arg){
2858  MpegEncContext *s= *(void**)arg;
2859  int mb_x, mb_y, mb_y_order;
2860  int chr_h= 16>>s->chroma_y_shift;
2861  int i, j;
2862  MpegEncContext best_s = { 0 }, backup_s;
2863  uint8_t bit_buf[2][MAX_MB_BYTES];
2864  uint8_t bit_buf2[2][MAX_MB_BYTES];
2865  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2866  PutBitContext pb[2], pb2[2], tex_pb[2];
2867 
2868  for(i=0; i<2; i++){
2869  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2870  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2871  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2872  }
2873 
2874  s->last_bits= put_bits_count(&s->pb);
2875  s->mv_bits=0;
2876  s->misc_bits=0;
2877  s->i_tex_bits=0;
2878  s->p_tex_bits=0;
2879  s->i_count=0;
2880  s->skip_count=0;
2881 
2882  for(i=0; i<3; i++){
2883  /* init last dc values */
2884  /* note: quant matrix value (8) is implied here */
2885  s->last_dc[i] = 128 << s->intra_dc_precision;
2886 
2887  s->encoding_error[i] = 0;
2888  }
2889  if(s->codec_id==AV_CODEC_ID_AMV){
2890  s->last_dc[0] = 128*8/13;
2891  s->last_dc[1] = 128*8/14;
2892  s->last_dc[2] = 128*8/14;
2893  }
2894  s->mb_skip_run = 0;
2895  memset(s->last_mv, 0, sizeof(s->last_mv));
2896 
2897  s->last_mv_dir = 0;
2898 
2899  switch(s->codec_id){
2900  case AV_CODEC_ID_H263:
2901  case AV_CODEC_ID_H263P:
2902  case AV_CODEC_ID_FLV1:
2903  if (CONFIG_H263_ENCODER)
2904  s->gob_index = H263_GOB_HEIGHT(s->height);
2905  break;
2906  case AV_CODEC_ID_MPEG4:
2907  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2909  break;
2910  }
2911 
2912  s->resync_mb_x=0;
2913  s->resync_mb_y=0;
2914  s->first_slice_line = 1;
2915  s->ptr_lastgob = s->pb.buf;
2916  for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2917  if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2918  int first_in_slice;
2919  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2920  if (first_in_slice && mb_y_order != s->start_mb_y)
2922  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2923  } else {
2924  mb_y = mb_y_order;
2925  }
2926  s->mb_x=0;
2927  s->mb_y= mb_y;
2928 
2929  ff_set_qscale(s, s->qscale);
2931 
2932  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2933  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2934  int mb_type= s->mb_type[xy];
2935 // int d;
2936  int dmin= INT_MAX;
2937  int dir;
2938  int size_increase = s->avctx->internal->byte_buffer_size/4
2939  + s->mb_width*MAX_MB_BYTES;
2940 
2942  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2943  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2944  return -1;
2945  }
2946  if(s->data_partitioning){
2947  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2948  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2949  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2950  return -1;
2951  }
2952  }
2953 
2954  s->mb_x = mb_x;
2955  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2956  ff_update_block_index(s, 8, 0, s->chroma_x_shift);
2957 
2958  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2960  xy= s->mb_y*s->mb_stride + s->mb_x;
2961  mb_type= s->mb_type[xy];
2962  }
2963 
2964  /* write gob / video packet header */
2965  if(s->rtp_mode){
2966  int current_packet_size, is_gob_start;
2967 
2968  current_packet_size = put_bytes_count(&s->pb, 1)
2969  - (s->ptr_lastgob - s->pb.buf);
2970 
2971  is_gob_start = s->rtp_payload_size &&
2972  current_packet_size >= s->rtp_payload_size &&
2973  mb_y + mb_x > 0;
2974 
2975  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2976 
2977  switch(s->codec_id){
2978  case AV_CODEC_ID_H263:
2979  case AV_CODEC_ID_H263P:
2980  if(!s->h263_slice_structured)
2981  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2982  break;
2984  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2986  if(s->mb_skip_run) is_gob_start=0;
2987  break;
2988  case AV_CODEC_ID_MJPEG:
2989  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2990  break;
2991  }
2992 
2993  if(is_gob_start){
2994  if(s->start_mb_y != mb_y || mb_x!=0){
2995  write_slice_end(s);
2996 
2997  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2999  }
3000  }
3001 
3002  av_assert2((put_bits_count(&s->pb)&7) == 0);
3003  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3004 
3005  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3006  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3007  int d = 100 / s->error_rate;
3008  if(r % d == 0){
3009  current_packet_size=0;
3010  s->pb.buf_ptr= s->ptr_lastgob;
3011  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3012  }
3013  }
3014 
3015  switch(s->codec_id){
3016  case AV_CODEC_ID_MPEG4:
3017  if (CONFIG_MPEG4_ENCODER) {
3020  }
3021  break;
3024  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3027  }
3028  break;
3029  case AV_CODEC_ID_H263:
3030  case AV_CODEC_ID_H263P:
3031  if (CONFIG_H263_ENCODER) {
3032  update_mb_info(s, 1);
3034  }
3035  break;
3036  }
3037 
3038  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3039  int bits= put_bits_count(&s->pb);
3040  s->misc_bits+= bits - s->last_bits;
3041  s->last_bits= bits;
3042  }
3043 
3044  s->ptr_lastgob += current_packet_size;
3045  s->first_slice_line=1;
3046  s->resync_mb_x=mb_x;
3047  s->resync_mb_y=mb_y;
3048  }
3049  }
3050 
3051  if( (s->resync_mb_x == s->mb_x)
3052  && s->resync_mb_y+1 == s->mb_y){
3053  s->first_slice_line=0;
3054  }
3055 
3056  s->mb_skipped=0;
3057  s->dquant=0; //only for QP_RD
3058 
3059  update_mb_info(s, 0);
3060 
3061  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3062  int next_block=0;
3063  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3064 
3065  copy_context_before_encode(&backup_s, s);
3066  backup_s.pb= s->pb;
3067  best_s.data_partitioning= s->data_partitioning;
3068  best_s.partitioned_frame= s->partitioned_frame;
3069  if(s->data_partitioning){
3070  backup_s.pb2= s->pb2;
3071  backup_s.tex_pb= s->tex_pb;
3072  }
3073 
3075  s->mv_dir = MV_DIR_FORWARD;
3076  s->mv_type = MV_TYPE_16X16;
3077  s->mb_intra= 0;
3078  s->mv[0][0][0] = s->p_mv_table[xy][0];
3079  s->mv[0][0][1] = s->p_mv_table[xy][1];
3080  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3081  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3082  }
3084  s->mv_dir = MV_DIR_FORWARD;
3085  s->mv_type = MV_TYPE_FIELD;
3086  s->mb_intra= 0;
3087  for(i=0; i<2; i++){
3088  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3089  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3090  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3091  }
3092  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3093  &dmin, &next_block, 0, 0);
3094  }
3096  s->mv_dir = MV_DIR_FORWARD;
3097  s->mv_type = MV_TYPE_16X16;
3098  s->mb_intra= 0;
3099  s->mv[0][0][0] = 0;
3100  s->mv[0][0][1] = 0;
3101  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3102  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3103  }
3105  s->mv_dir = MV_DIR_FORWARD;
3106  s->mv_type = MV_TYPE_8X8;
3107  s->mb_intra= 0;
3108  for(i=0; i<4; i++){
3109  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3110  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3111  }
3112  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3113  &dmin, &next_block, 0, 0);
3114  }
3116  s->mv_dir = MV_DIR_FORWARD;
3117  s->mv_type = MV_TYPE_16X16;
3118  s->mb_intra= 0;
3119  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3120  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3121  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3122  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3123  }
3125  s->mv_dir = MV_DIR_BACKWARD;
3126  s->mv_type = MV_TYPE_16X16;
3127  s->mb_intra= 0;
3128  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3129  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3130  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3131  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3132  }
3134  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3135  s->mv_type = MV_TYPE_16X16;
3136  s->mb_intra= 0;
3137  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3138  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3139  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3140  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3141  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3142  &dmin, &next_block, 0, 0);
3143  }
3145  s->mv_dir = MV_DIR_FORWARD;
3146  s->mv_type = MV_TYPE_FIELD;
3147  s->mb_intra= 0;
3148  for(i=0; i<2; i++){
3149  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3150  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3151  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3152  }
3153  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3154  &dmin, &next_block, 0, 0);
3155  }
3157  s->mv_dir = MV_DIR_BACKWARD;
3158  s->mv_type = MV_TYPE_FIELD;
3159  s->mb_intra= 0;
3160  for(i=0; i<2; i++){
3161  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3162  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3163  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3164  }
3165  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3166  &dmin, &next_block, 0, 0);
3167  }
3169  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3170  s->mv_type = MV_TYPE_FIELD;
3171  s->mb_intra= 0;
3172  for(dir=0; dir<2; dir++){
3173  for(i=0; i<2; i++){
3174  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3175  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3176  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3177  }
3178  }
3179  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3180  &dmin, &next_block, 0, 0);
3181  }
3183  s->mv_dir = 0;
3184  s->mv_type = MV_TYPE_16X16;
3185  s->mb_intra= 1;
3186  s->mv[0][0][0] = 0;
3187  s->mv[0][0][1] = 0;
3188  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3189  &dmin, &next_block, 0, 0);
3190  if(s->h263_pred || s->h263_aic){
3191  if(best_s.mb_intra)
3192  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3193  else
3194  ff_clean_intra_table_entries(s); //old mode?
3195  }
3196  }
3197 
3198  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3199  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3200  const int last_qp= backup_s.qscale;
3201  int qpi, qp, dc[6];
3202  int16_t ac[6][16];
3203  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3204  static const int dquant_tab[4]={-1,1,-2,2};
3205  int storecoefs = s->mb_intra && s->dc_val[0];
3206 
3207  av_assert2(backup_s.dquant == 0);
3208 
3209  //FIXME intra
3210  s->mv_dir= best_s.mv_dir;
3211  s->mv_type = MV_TYPE_16X16;
3212  s->mb_intra= best_s.mb_intra;
3213  s->mv[0][0][0] = best_s.mv[0][0][0];
3214  s->mv[0][0][1] = best_s.mv[0][0][1];
3215  s->mv[1][0][0] = best_s.mv[1][0][0];
3216  s->mv[1][0][1] = best_s.mv[1][0][1];
3217 
3218  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3219  for(; qpi<4; qpi++){
3220  int dquant= dquant_tab[qpi];
3221  qp= last_qp + dquant;
3222  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3223  continue;
3224  backup_s.dquant= dquant;
3225  if(storecoefs){
3226  for(i=0; i<6; i++){
3227  dc[i]= s->dc_val[0][ s->block_index[i] ];
3228  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3229  }
3230  }
3231 
3232  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3233  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3234  if(best_s.qscale != qp){
3235  if(storecoefs){
3236  for(i=0; i<6; i++){
3237  s->dc_val[0][ s->block_index[i] ]= dc[i];
3238  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3239  }
3240  }
3241  }
3242  }
3243  }
3244  }
3245  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3246  int mx= s->b_direct_mv_table[xy][0];
3247  int my= s->b_direct_mv_table[xy][1];
3248 
3249  backup_s.dquant = 0;
3250  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3251  s->mb_intra= 0;
3252  ff_mpeg4_set_direct_mv(s, mx, my);
3253  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3254  &dmin, &next_block, mx, my);
3255  }
3256  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3257  backup_s.dquant = 0;
3258  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3259  s->mb_intra= 0;
3260  ff_mpeg4_set_direct_mv(s, 0, 0);
3261  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3262  &dmin, &next_block, 0, 0);
3263  }
3264  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3265  int coded=0;
3266  for(i=0; i<6; i++)
3267  coded |= s->block_last_index[i];
3268  if(coded){
3269  int mx,my;
3270  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3271  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3272  mx=my=0; //FIXME find the one we actually used
3273  ff_mpeg4_set_direct_mv(s, mx, my);
3274  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3275  mx= s->mv[1][0][0];
3276  my= s->mv[1][0][1];
3277  }else{
3278  mx= s->mv[0][0][0];
3279  my= s->mv[0][0][1];
3280  }
3281 
3282  s->mv_dir= best_s.mv_dir;
3283  s->mv_type = best_s.mv_type;
3284  s->mb_intra= 0;
3285 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3286  s->mv[0][0][1] = best_s.mv[0][0][1];
3287  s->mv[1][0][0] = best_s.mv[1][0][0];
3288  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3289  backup_s.dquant= 0;
3290  s->skipdct=1;
3291  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3292  &dmin, &next_block, mx, my);
3293  s->skipdct=0;
3294  }
3295  }
3296 
3297  s->current_picture.qscale_table[xy] = best_s.qscale;
3298 
3299  copy_context_after_encode(s, &best_s);
3300 
3301  pb_bits_count= put_bits_count(&s->pb);
3302  flush_put_bits(&s->pb);
3303  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3304  s->pb= backup_s.pb;
3305 
3306  if(s->data_partitioning){
3307  pb2_bits_count= put_bits_count(&s->pb2);
3308  flush_put_bits(&s->pb2);
3309  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3310  s->pb2= backup_s.pb2;
3311 
3312  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3313  flush_put_bits(&s->tex_pb);
3314  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3315  s->tex_pb= backup_s.tex_pb;
3316  }
3317  s->last_bits= put_bits_count(&s->pb);
3318 
3319  if (CONFIG_H263_ENCODER &&
3320  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3322 
3323  if(next_block==0){ //FIXME 16 vs linesize16
3324  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3325  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3326  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3327  }
3328 
3329  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3330  mpv_reconstruct_mb(s, s->block);
3331  } else {
3332  int motion_x = 0, motion_y = 0;
3333  s->mv_type=MV_TYPE_16X16;
3334  // only one MB-Type possible
3335 
3336  switch(mb_type){
3338  s->mv_dir = 0;
3339  s->mb_intra= 1;
3340  motion_x= s->mv[0][0][0] = 0;
3341  motion_y= s->mv[0][0][1] = 0;
3342  break;
3344  s->mv_dir = MV_DIR_FORWARD;
3345  s->mb_intra= 0;
3346  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3347  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3348  break;
3350  s->mv_dir = MV_DIR_FORWARD;
3351  s->mv_type = MV_TYPE_FIELD;
3352  s->mb_intra= 0;
3353  for(i=0; i<2; i++){
3354  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3355  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3356  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3357  }
3358  break;
3360  s->mv_dir = MV_DIR_FORWARD;
3361  s->mv_type = MV_TYPE_8X8;
3362  s->mb_intra= 0;
3363  for(i=0; i<4; i++){
3364  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3365  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3366  }
3367  break;
3369  if (CONFIG_MPEG4_ENCODER) {
3371  s->mb_intra= 0;
3372  motion_x=s->b_direct_mv_table[xy][0];
3373  motion_y=s->b_direct_mv_table[xy][1];
3374  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3375  }
3376  break;
3378  if (CONFIG_MPEG4_ENCODER) {
3380  s->mb_intra= 0;
3381  ff_mpeg4_set_direct_mv(s, 0, 0);
3382  }
3383  break;
3385  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3386  s->mb_intra= 0;
3387  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3388  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3389  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3390  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3391  break;
3393  s->mv_dir = MV_DIR_BACKWARD;
3394  s->mb_intra= 0;
3395  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3396  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3397  break;
3399  s->mv_dir = MV_DIR_FORWARD;
3400  s->mb_intra= 0;
3401  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3402  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3403  break;
3405  s->mv_dir = MV_DIR_FORWARD;
3406  s->mv_type = MV_TYPE_FIELD;
3407  s->mb_intra= 0;
3408  for(i=0; i<2; i++){
3409  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3410  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3411  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3412  }
3413  break;
3415  s->mv_dir = MV_DIR_BACKWARD;
3416  s->mv_type = MV_TYPE_FIELD;
3417  s->mb_intra= 0;
3418  for(i=0; i<2; i++){
3419  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3420  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3421  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3422  }
3423  break;
3425  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3426  s->mv_type = MV_TYPE_FIELD;
3427  s->mb_intra= 0;
3428  for(dir=0; dir<2; dir++){
3429  for(i=0; i<2; i++){
3430  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3431  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3432  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3433  }
3434  }
3435  break;
3436  default:
3437  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3438  }
3439 
3440  encode_mb(s, motion_x, motion_y);
3441 
3442  // RAL: Update last macroblock type
3443  s->last_mv_dir = s->mv_dir;
3444 
3445  if (CONFIG_H263_ENCODER &&
3446  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3448 
3449  mpv_reconstruct_mb(s, s->block);
3450  }
3451 
3452  /* clean the MV table in IPS frames for direct mode in B-frames */
3453  if(s->mb_intra /* && I,P,S_TYPE */){
3454  s->p_mv_table[xy][0]=0;
3455  s->p_mv_table[xy][1]=0;
3456  }
3457 
3458  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3459  int w= 16;
3460  int h= 16;
3461 
3462  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3463  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3464 
3465  s->encoding_error[0] += sse(
3466  s, s->new_picture->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3467  s->dest[0], w, h, s->linesize);
3468  s->encoding_error[1] += sse(
3469  s, s->new_picture->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3470  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3471  s->encoding_error[2] += sse(
3472  s, s->new_picture->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3473  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3474  }
3475  if(s->loop_filter){
3476  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3478  }
3479  ff_dlog(s->avctx, "MB %d %d bits\n",
3480  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3481  }
3482  }
3483 
3484  //not beautiful here but we must write it before flushing so it has to be here
3485  if (CONFIG_MSMPEG4ENC && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3487 
3488  write_slice_end(s);
3489 
3490  return 0;
3491 }
3492 
3493 #define MERGE(field) dst->field += src->field; src->field=0
3495  MERGE(me.scene_change_score);
3496  MERGE(me.mc_mb_var_sum_temp);
3497  MERGE(me.mb_var_sum_temp);
3498 }
3499 
3501  int i;
3502 
3503  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3504  MERGE(dct_count[1]);
3505  MERGE(mv_bits);
3506  MERGE(i_tex_bits);
3507  MERGE(p_tex_bits);
3508  MERGE(i_count);
3509  MERGE(skip_count);
3510  MERGE(misc_bits);
3511  MERGE(encoding_error[0]);
3512  MERGE(encoding_error[1]);
3513  MERGE(encoding_error[2]);
3514 
3515  if (dst->noise_reduction){
3516  for(i=0; i<64; i++){
3517  MERGE(dct_error_sum[0][i]);
3518  MERGE(dct_error_sum[1][i]);
3519  }
3520  }
3521 
3522  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3523  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3524  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3525  flush_put_bits(&dst->pb);
3526 }
3527 
3528 static int estimate_qp(MpegEncContext *s, int dry_run){
3529  if (s->next_lambda){
3530  s->current_picture_ptr->f->quality =
3531  s->current_picture.f->quality = s->next_lambda;
3532  if(!dry_run) s->next_lambda= 0;
3533  } else if (!s->fixed_qscale) {
3534  int quality = ff_rate_estimate_qscale(s, dry_run);
3535  s->current_picture_ptr->f->quality =
3536  s->current_picture.f->quality = quality;
3537  if (s->current_picture.f->quality < 0)
3538  return -1;
3539  }
3540 
3541  if(s->adaptive_quant){
3542  switch(s->codec_id){
3543  case AV_CODEC_ID_MPEG4:
3544  if (CONFIG_MPEG4_ENCODER)
3546  break;
3547  case AV_CODEC_ID_H263:
3548  case AV_CODEC_ID_H263P:
3549  case AV_CODEC_ID_FLV1:
3550  if (CONFIG_H263_ENCODER)
3552  break;
3553  default:
3555  }
3556 
3557  s->lambda= s->lambda_table[0];
3558  //FIXME broken
3559  }else
3560  s->lambda = s->current_picture.f->quality;
3561  update_qscale(s);
3562  return 0;
3563 }
3564 
3565 /* must be called before writing the header */
3567  av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3568  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3569 
3570  if(s->pict_type==AV_PICTURE_TYPE_B){
3571  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3572  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3573  }else{
3574  s->pp_time= s->time - s->last_non_b_time;
3575  s->last_non_b_time= s->time;
3576  av_assert1(s->picture_number==0 || s->pp_time > 0);
3577  }
3578 }
3579 
3581 {
3582  int i, ret;
3583  int bits;
3584  int context_count = s->slice_context_count;
3585 
3586  /* Reset the average MB variance */
3587  s->me.mb_var_sum_temp =
3588  s->me.mc_mb_var_sum_temp = 0;
3589 
3590  /* we need to initialize some time vars before we can encode B-frames */
3591  // RAL: Condition added for MPEG1VIDEO
3592  if (s->out_format == FMT_MPEG1 || (s->h263_pred && !s->msmpeg4_version))
3594  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3596 
3597  s->me.scene_change_score=0;
3598 
3599 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3600 
3601  if(s->pict_type==AV_PICTURE_TYPE_I){
3602  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3603  else s->no_rounding=0;
3604  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3605  if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3606  s->no_rounding ^= 1;
3607  }
3608 
3609  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3610  if (estimate_qp(s,1) < 0)
3611  return -1;
3613  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3614  if(s->pict_type==AV_PICTURE_TYPE_B)
3615  s->lambda= s->last_lambda_for[s->pict_type];
3616  else
3617  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3618  update_qscale(s);
3619  }
3620 
3621  if (s->out_format != FMT_MJPEG) {
3622  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3623  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3624  s->q_chroma_intra_matrix = s->q_intra_matrix;
3625  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3626  }
3627 
3628  s->mb_intra=0; //for the rate distortion & bit compare functions
3629  for(i=1; i<context_count; i++){
3630  ret = ff_update_duplicate_context(s->thread_context[i], s);
3631  if (ret < 0)
3632  return ret;
3633  }
3634 
3635  if(ff_init_me(s)<0)
3636  return -1;
3637 
3638  /* Estimate motion for every MB */
3639  if(s->pict_type != AV_PICTURE_TYPE_I){
3640  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3641  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3642  if (s->pict_type != AV_PICTURE_TYPE_B) {
3643  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3644  s->me_pre == 2) {
3645  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3646  }
3647  }
3648 
3649  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3650  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3651  /* I-Frame */
3652  for(i=0; i<s->mb_stride*s->mb_height; i++)
3653  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3654 
3655  if(!s->fixed_qscale){
3656  /* finding spatial complexity for I-frame rate control */
3657  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3658  }
3659  }
3660  for(i=1; i<context_count; i++){
3661  merge_context_after_me(s, s->thread_context[i]);
3662  }
3663  s->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3664  s->mb_var_sum = s->me. mb_var_sum_temp;
3665  emms_c();
3666 
3667  if (s->me.scene_change_score > s->scenechange_threshold &&
3668  s->pict_type == AV_PICTURE_TYPE_P) {
3669  s->pict_type= AV_PICTURE_TYPE_I;
3670  for(i=0; i<s->mb_stride*s->mb_height; i++)
3671  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3672  if(s->msmpeg4_version >= 3)
3673  s->no_rounding=1;
3674  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3675  s->mb_var_sum, s->mc_mb_var_sum);
3676  }
3677 
3678  if(!s->umvplus){
3679  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3680  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3681 
3682  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3683  int a,b;
3684  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3685  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3686  s->f_code= FFMAX3(s->f_code, a, b);
3687  }
3688 
3690  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3691  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3692  int j;
3693  for(i=0; i<2; i++){
3694  for(j=0; j<2; j++)
3695  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3696  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3697  }
3698  }
3699  } else if (s->pict_type == AV_PICTURE_TYPE_B) {
3700  int a, b;
3701 
3702  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3703  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3704  s->f_code = FFMAX(a, b);
3705 
3706  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3707  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3708  s->b_code = FFMAX(a, b);
3709 
3710  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3711  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3712  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3713  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3714  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3715  int dir, j;
3716  for(dir=0; dir<2; dir++){
3717  for(i=0; i<2; i++){
3718  for(j=0; j<2; j++){
3721  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3722  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3723  }
3724  }
3725  }
3726  }
3727  }
3728  }
3729 
3730  if (estimate_qp(s, 0) < 0)
3731  return -1;
3732 
3733  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3734  s->pict_type == AV_PICTURE_TYPE_I &&
3735  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3736  s->qscale= 3; //reduce clipping problems
3737 
3738  if (s->out_format == FMT_MJPEG) {
3739  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3740  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3741 
3742  if (s->avctx->intra_matrix) {
3743  chroma_matrix =
3744  luma_matrix = s->avctx->intra_matrix;
3745  }
3746  if (s->avctx->chroma_intra_matrix)
3747  chroma_matrix = s->avctx->chroma_intra_matrix;
3748 
3749  /* for mjpeg, we do include qscale in the matrix */
3750  for(i=1;i<64;i++){
3751  int j = s->idsp.idct_permutation[i];
3752 
3753  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3754  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3755  }
3756  s->y_dc_scale_table=
3757  s->c_dc_scale_table = ff_mpeg12_dc_scale_table[s->intra_dc_precision];
3758  s->chroma_intra_matrix[0] =
3759  s->intra_matrix[0] = ff_mpeg12_dc_scale_table[s->intra_dc_precision][8];
3760  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3761  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3762  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3763  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3764  s->qscale= 8;
3765 
3766  if (s->codec_id == AV_CODEC_ID_AMV) {
3767  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3768  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3769  for (int i = 1; i < 64; i++) {
3770  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
3771 
3772  s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3773  s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3774  }
3775  s->y_dc_scale_table = y;
3776  s->c_dc_scale_table = c;
3777  s->intra_matrix[0] = 13;
3778  s->chroma_intra_matrix[0] = 14;
3779  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3780  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3781  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3782  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3783  s->qscale = 8;
3784  }
3785  }
3786 
3787  //FIXME var duplication
3788  if (s->pict_type == AV_PICTURE_TYPE_I) {
3789  s->current_picture_ptr->f->flags |= AV_FRAME_FLAG_KEY; //FIXME pic_ptr
3790  s->current_picture.f->flags |= AV_FRAME_FLAG_KEY;
3791  } else {
3792  s->current_picture_ptr->f->flags &= ~AV_FRAME_FLAG_KEY; //FIXME pic_ptr
3793  s->current_picture.f->flags &= ~AV_FRAME_FLAG_KEY;
3794  }
3795  s->current_picture_ptr->f->pict_type =
3796  s->current_picture.f->pict_type = s->pict_type;
3797 
3798  if (s->current_picture.f->flags & AV_FRAME_FLAG_KEY)
3799  s->picture_in_gop_number=0;
3800 
3801  s->mb_x = s->mb_y = 0;
3802  s->last_bits= put_bits_count(&s->pb);
3803  switch(s->out_format) {
3804 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3805  case FMT_MJPEG:
3807  break;
3808 #endif
3809  case FMT_SPEEDHQ:
3810  if (CONFIG_SPEEDHQ_ENCODER)
3812  break;
3813  case FMT_H261:
3814  if (CONFIG_H261_ENCODER)
3816  break;
3817  case FMT_H263:
3818  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3820  else if (CONFIG_MSMPEG4ENC && s->msmpeg4_version)
3822  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3824  if (ret < 0)
3825  return ret;
3826  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3828  if (ret < 0)
3829  return ret;
3830  }
3831  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3833  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3835  else if (CONFIG_H263_ENCODER)
3837  break;
3838  case FMT_MPEG1:
3839  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3841  break;
3842  default:
3843  av_assert0(0);
3844  }
3845  bits= put_bits_count(&s->pb);
3846  s->header_bits= bits - s->last_bits;
3847 
3848  for(i=1; i<context_count; i++){
3849  update_duplicate_context_after_me(s->thread_context[i], s);
3850  }
3851  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3852  for(i=1; i<context_count; i++){
3853  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3854  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3855  merge_context_after_encode(s, s->thread_context[i]);
3856  }
3857  emms_c();
3858  return 0;
3859 }
3860 
3861 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3862  const int intra= s->mb_intra;
3863  int i;
3864 
3865  s->dct_count[intra]++;
3866 
3867  for(i=0; i<64; i++){
3868  int level= block[i];
3869 
3870  if(level){
3871  if(level>0){
3872  s->dct_error_sum[intra][i] += level;
3873  level -= s->dct_offset[intra][i];
3874  if(level<0) level=0;
3875  }else{
3876  s->dct_error_sum[intra][i] -= level;
3877  level += s->dct_offset[intra][i];
3878  if(level>0) level=0;
3879  }
3880  block[i]= level;
3881  }
3882  }
3883 }
3884 
3886  int16_t *block, int n,
3887  int qscale, int *overflow){
3888  const int *qmat;
3889  const uint16_t *matrix;
3890  const uint8_t *scantable;
3891  const uint8_t *perm_scantable;
3892  int max=0;
3893  unsigned int threshold1, threshold2;
3894  int bias=0;
3895  int run_tab[65];
3896  int level_tab[65];
3897  int score_tab[65];
3898  int survivor[65];
3899  int survivor_count;
3900  int last_run=0;
3901  int last_level=0;
3902  int last_score= 0;
3903  int last_i;
3904  int coeff[2][64];
3905  int coeff_count[64];
3906  int qmul, qadd, start_i, last_non_zero, i, dc;
3907  const int esc_length= s->ac_esc_length;
3908  uint8_t * length;
3909  uint8_t * last_length;
3910  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3911  int mpeg2_qscale;
3912 
3913  s->fdsp.fdct(block);
3914 
3915  if(s->dct_error_sum)
3916  s->denoise_dct(s, block);
3917  qmul= qscale*16;
3918  qadd= ((qscale-1)|1)*8;
3919 
3920  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3921  else mpeg2_qscale = qscale << 1;
3922 
3923  if (s->mb_intra) {
3924  int q;
3925  scantable= s->intra_scantable.scantable;
3926  perm_scantable= s->intra_scantable.permutated;
3927  if (!s->h263_aic) {
3928  if (n < 4)
3929  q = s->y_dc_scale;
3930  else
3931  q = s->c_dc_scale;
3932  q = q << 3;
3933  } else{
3934  /* For AIC we skip quant/dequant of INTRADC */
3935  q = 1 << 3;
3936  qadd=0;
3937  }
3938 
3939  /* note: block[0] is assumed to be positive */
3940  block[0] = (block[0] + (q >> 1)) / q;
3941  start_i = 1;
3942  last_non_zero = 0;
3943  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3944  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3945  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3946  bias= 1<<(QMAT_SHIFT-1);
3947 
3948  if (n > 3 && s->intra_chroma_ac_vlc_length) {
3949  length = s->intra_chroma_ac_vlc_length;
3950  last_length= s->intra_chroma_ac_vlc_last_length;
3951  } else {
3952  length = s->intra_ac_vlc_length;
3953  last_length= s->intra_ac_vlc_last_length;
3954  }
3955  } else {
3956  scantable= s->inter_scantable.scantable;
3957  perm_scantable= s->inter_scantable.permutated;
3958  start_i = 0;
3959  last_non_zero = -1;
3960  qmat = s->q_inter_matrix[qscale];
3961  matrix = s->inter_matrix;
3962  length = s->inter_ac_vlc_length;
3963  last_length= s->inter_ac_vlc_last_length;
3964  }
3965  last_i= start_i;
3966 
3967  threshold1= (1<<QMAT_SHIFT) - bias - 1;
3968  threshold2= (threshold1<<1);
3969 
3970  for(i=63; i>=start_i; i--) {
3971  const int j = scantable[i];
3972  int level = block[j] * qmat[j];
3973 
3974  if(((unsigned)(level+threshold1))>threshold2){
3975  last_non_zero = i;
3976  break;
3977  }
3978  }
3979 
3980  for(i=start_i; i<=last_non_zero; i++) {
3981  const int j = scantable[i];
3982  int level = block[j] * qmat[j];
3983 
3984 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3985 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3986  if(((unsigned)(level+threshold1))>threshold2){
3987  if(level>0){
3988  level= (bias + level)>>QMAT_SHIFT;
3989  coeff[0][i]= level;
3990  coeff[1][i]= level-1;
3991 // coeff[2][k]= level-2;
3992  }else{
3993  level= (bias - level)>>QMAT_SHIFT;
3994  coeff[0][i]= -level;
3995  coeff[1][i]= -level+1;
3996 // coeff[2][k]= -level+2;
3997  }
3998  coeff_count[i]= FFMIN(level, 2);
3999  av_assert2(coeff_count[i]);
4000  max |=level;
4001  }else{
4002  coeff[0][i]= (level>>31)|1;
4003  coeff_count[i]= 1;
4004  }
4005  }
4006 
4007  *overflow= s->max_qcoeff < max; //overflow might have happened
4008 
4009  if(last_non_zero < start_i){
4010  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4011  return last_non_zero;
4012  }
4013 
4014  score_tab[start_i]= 0;
4015  survivor[0]= start_i;
4016  survivor_count= 1;
4017 
4018  for(i=start_i; i<=last_non_zero; i++){
4019  int level_index, j, zero_distortion;
4020  int dct_coeff= FFABS(block[ scantable[i] ]);
4021  int best_score=256*256*256*120;
4022 
4023  if (s->fdsp.fdct == ff_fdct_ifast)
4024  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4025  zero_distortion= dct_coeff*dct_coeff;
4026 
4027  for(level_index=0; level_index < coeff_count[i]; level_index++){
4028  int distortion;
4029  int level= coeff[level_index][i];
4030  const int alevel= FFABS(level);
4031  int unquant_coeff;
4032 
4033  av_assert2(level);
4034 
4035  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4036  unquant_coeff= alevel*qmul + qadd;
4037  } else if(s->out_format == FMT_MJPEG) {
4038  j = s->idsp.idct_permutation[scantable[i]];
4039  unquant_coeff = alevel * matrix[j] * 8;
4040  }else{ // MPEG-1
4041  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4042  if(s->mb_intra){
4043  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4044  unquant_coeff = (unquant_coeff - 1) | 1;
4045  }else{
4046  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4047  unquant_coeff = (unquant_coeff - 1) | 1;
4048  }
4049  unquant_coeff<<= 3;
4050  }
4051 
4052  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4053  level+=64;
4054  if((level&(~127)) == 0){
4055  for(j=survivor_count-1; j>=0; j--){
4056  int run= i - survivor[j];
4057  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4058  score += score_tab[i-run];
4059 
4060  if(score < best_score){
4061  best_score= score;
4062  run_tab[i+1]= run;
4063  level_tab[i+1]= level-64;
4064  }
4065  }
4066 
4067  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4068  for(j=survivor_count-1; j>=0; j--){
4069  int run= i - survivor[j];
4070  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4071  score += score_tab[i-run];
4072  if(score < last_score){
4073  last_score= score;
4074  last_run= run;
4075  last_level= level-64;
4076  last_i= i+1;
4077  }
4078  }
4079  }
4080  }else{
4081  distortion += esc_length*lambda;
4082  for(j=survivor_count-1; j>=0; j--){
4083  int run= i - survivor[j];
4084  int score= distortion + score_tab[i-run];
4085 
4086  if(score < best_score){
4087  best_score= score;
4088  run_tab[i+1]= run;
4089  level_tab[i+1]= level-64;
4090  }
4091  }
4092 
4093  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4094  for(j=survivor_count-1; j>=0; j--){
4095  int run= i - survivor[j];
4096  int score= distortion + score_tab[i-run];
4097  if(score < last_score){
4098  last_score= score;
4099  last_run= run;
4100  last_level= level-64;
4101  last_i= i+1;
4102  }
4103  }
4104  }
4105  }
4106  }
4107 
4108  score_tab[i+1]= best_score;
4109 
4110  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4111  if(last_non_zero <= 27){
4112  for(; survivor_count; survivor_count--){
4113  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4114  break;
4115  }
4116  }else{
4117  for(; survivor_count; survivor_count--){
4118  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4119  break;
4120  }
4121  }
4122 
4123  survivor[ survivor_count++ ]= i+1;
4124  }
4125 
4126  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4127  last_score= 256*256*256*120;
4128  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4129  int score= score_tab[i];
4130  if (i)
4131  score += lambda * 2; // FIXME more exact?
4132 
4133  if(score < last_score){
4134  last_score= score;
4135  last_i= i;
4136  last_level= level_tab[i];
4137  last_run= run_tab[i];
4138  }
4139  }
4140  }
4141 
4142  s->coded_score[n] = last_score;
4143 
4144  dc= FFABS(block[0]);
4145  last_non_zero= last_i - 1;
4146  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4147 
4148  if(last_non_zero < start_i)
4149  return last_non_zero;
4150 
4151  if(last_non_zero == 0 && start_i == 0){
4152  int best_level= 0;
4153  int best_score= dc * dc;
4154 
4155  for(i=0; i<coeff_count[0]; i++){
4156  int level= coeff[i][0];
4157  int alevel= FFABS(level);
4158  int unquant_coeff, score, distortion;
4159 
4160  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4161  unquant_coeff= (alevel*qmul + qadd)>>3;
4162  } else{ // MPEG-1
4163  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4164  unquant_coeff = (unquant_coeff - 1) | 1;
4165  }
4166  unquant_coeff = (unquant_coeff + 4) >> 3;
4167  unquant_coeff<<= 3 + 3;
4168 
4169  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4170  level+=64;
4171  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4172  else score= distortion + esc_length*lambda;
4173 
4174  if(score < best_score){
4175  best_score= score;
4176  best_level= level - 64;
4177  }
4178  }
4179  block[0]= best_level;
4180  s->coded_score[n] = best_score - dc*dc;
4181  if(best_level == 0) return -1;
4182  else return last_non_zero;
4183  }
4184 
4185  i= last_i;
4186  av_assert2(last_level);
4187 
4188  block[ perm_scantable[last_non_zero] ]= last_level;
4189  i -= last_run + 1;
4190 
4191  for(; i>start_i; i -= run_tab[i] + 1){
4192  block[ perm_scantable[i-1] ]= level_tab[i];
4193  }
4194 
4195  return last_non_zero;
4196 }
4197 
4198 static int16_t basis[64][64];
4199 
4200 static void build_basis(uint8_t *perm){
4201  int i, j, x, y;
4202  emms_c();
4203  for(i=0; i<8; i++){
4204  for(j=0; j<8; j++){
4205  for(y=0; y<8; y++){
4206  for(x=0; x<8; x++){
4207  double s= 0.25*(1<<BASIS_SHIFT);
4208  int index= 8*i + j;
4209  int perm_index= perm[index];
4210  if(i==0) s*= sqrt(0.5);
4211  if(j==0) s*= sqrt(0.5);
4212  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4213  }
4214  }
4215  }
4216  }
4217 }
4218 
4219 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4220  int16_t *block, int16_t *weight, int16_t *orig,
4221  int n, int qscale){
4222  int16_t rem[64];
4223  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4224  const uint8_t *scantable;
4225  const uint8_t *perm_scantable;
4226 // unsigned int threshold1, threshold2;
4227 // int bias=0;
4228  int run_tab[65];
4229  int prev_run=0;
4230  int prev_level=0;
4231  int qmul, qadd, start_i, last_non_zero, i, dc;
4232  uint8_t * length;
4233  uint8_t * last_length;
4234  int lambda;
4235  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4236 
4237  if(basis[0][0] == 0)
4238  build_basis(s->idsp.idct_permutation);
4239 
4240  qmul= qscale*2;
4241  qadd= (qscale-1)|1;
4242  if (s->mb_intra) {
4243  scantable= s->intra_scantable.scantable;
4244  perm_scantable= s->intra_scantable.permutated;
4245  if (!s->h263_aic) {
4246  if (n < 4)
4247  q = s->y_dc_scale;
4248  else
4249  q = s->c_dc_scale;
4250  } else{
4251  /* For AIC we skip quant/dequant of INTRADC */
4252  q = 1;
4253  qadd=0;
4254  }
4255  q <<= RECON_SHIFT-3;
4256  /* note: block[0] is assumed to be positive */
4257  dc= block[0]*q;
4258 // block[0] = (block[0] + (q >> 1)) / q;
4259  start_i = 1;
4260 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4261 // bias= 1<<(QMAT_SHIFT-1);
4262  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4263  length = s->intra_chroma_ac_vlc_length;
4264  last_length= s->intra_chroma_ac_vlc_last_length;
4265  } else {
4266  length = s->intra_ac_vlc_length;
4267  last_length= s->intra_ac_vlc_last_length;
4268  }
4269  } else {
4270  scantable= s->inter_scantable.scantable;
4271  perm_scantable= s->inter_scantable.permutated;
4272  dc= 0;
4273  start_i = 0;
4274  length = s->inter_ac_vlc_length;
4275  last_length= s->inter_ac_vlc_last_length;
4276  }
4277  last_non_zero = s->block_last_index[n];
4278 
4279  dc += (1<<(RECON_SHIFT-1));
4280  for(i=0; i<64; i++){
4281  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4282  }
4283 
4284  sum=0;
4285  for(i=0; i<64; i++){
4286  int one= 36;
4287  int qns=4;
4288  int w;
4289 
4290  w= FFABS(weight[i]) + qns*one;
4291  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4292 
4293  weight[i] = w;
4294 // w=weight[i] = (63*qns + (w/2)) / w;
4295 
4296  av_assert2(w>0);
4297  av_assert2(w<(1<<6));
4298  sum += w*w;
4299  }
4300  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4301 
4302  run=0;
4303  rle_index=0;
4304  for(i=start_i; i<=last_non_zero; i++){
4305  int j= perm_scantable[i];
4306  const int level= block[j];
4307  int coeff;
4308 
4309  if(level){
4310  if(level<0) coeff= qmul*level - qadd;
4311  else coeff= qmul*level + qadd;
4312  run_tab[rle_index++]=run;
4313  run=0;
4314 
4315  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4316  }else{
4317  run++;
4318  }
4319  }
4320 
4321  for(;;){
4322  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4323  int best_coeff=0;
4324  int best_change=0;
4325  int run2, best_unquant_change=0, analyze_gradient;
4326  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4327 
4328  if(analyze_gradient){
4329  for(i=0; i<64; i++){
4330  int w= weight[i];
4331 
4332  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4333  }
4334  s->fdsp.fdct(d1);
4335  }
4336 
4337  if(start_i){
4338  const int level= block[0];
4339  int change, old_coeff;
4340 
4341  av_assert2(s->mb_intra);
4342 
4343  old_coeff= q*level;
4344 
4345  for(change=-1; change<=1; change+=2){
4346  int new_level= level + change;
4347  int score, new_coeff;
4348 
4349  new_coeff= q*new_level;
4350  if(new_coeff >= 2048 || new_coeff < 0)
4351  continue;
4352 
4353  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4354  new_coeff - old_coeff);
4355  if(score<best_score){
4356  best_score= score;
4357  best_coeff= 0;
4358  best_change= change;
4359  best_unquant_change= new_coeff - old_coeff;
4360  }
4361  }
4362  }
4363 
4364  run=0;
4365  rle_index=0;
4366  run2= run_tab[rle_index++];
4367  prev_level=0;
4368  prev_run=0;
4369 
4370  for(i=start_i; i<64; i++){
4371  int j= perm_scantable[i];
4372  const int level= block[j];
4373  int change, old_coeff;
4374 
4375  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4376  break;
4377 
4378  if(level){
4379  if(level<0) old_coeff= qmul*level - qadd;
4380  else old_coeff= qmul*level + qadd;
4381  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4382  }else{
4383  old_coeff=0;
4384  run2--;
4385  av_assert2(run2>=0 || i >= last_non_zero );
4386  }
4387 
4388  for(change=-1; change<=1; change+=2){
4389  int new_level= level + change;
4390  int score, new_coeff, unquant_change;
4391 
4392  score=0;
4393  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4394  continue;
4395 
4396  if(new_level){
4397  if(new_level<0) new_coeff= qmul*new_level - qadd;
4398  else new_coeff= qmul*new_level + qadd;
4399  if(new_coeff >= 2048 || new_coeff <= -2048)
4400  continue;
4401  //FIXME check for overflow
4402 
4403  if(level){
4404  if(level < 63 && level > -63){
4405  if(i < last_non_zero)
4406  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4407  - length[UNI_AC_ENC_INDEX(run, level+64)];
4408  else
4409  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4410  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4411  }
4412  }else{
4413  av_assert2(FFABS(new_level)==1);
4414 
4415  if(analyze_gradient){
4416  int g= d1[ scantable[i] ];
4417  if(g && (g^new_level) >= 0)
4418  continue;
4419  }
4420 
4421  if(i < last_non_zero){
4422  int next_i= i + run2 + 1;
4423  int next_level= block[ perm_scantable[next_i] ] + 64;
4424 
4425  if(next_level&(~127))
4426  next_level= 0;
4427 
4428  if(next_i < last_non_zero)
4429  score += length[UNI_AC_ENC_INDEX(run, 65)]
4430  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4431  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4432  else
4433  score += length[UNI_AC_ENC_INDEX(run, 65)]
4434  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4435  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4436  }else{
4437  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4438  if(prev_level){
4439  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4440  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4441  }
4442  }
4443  }
4444  }else{
4445  new_coeff=0;
4446  av_assert2(FFABS(level)==1);
4447 
4448  if(i < last_non_zero){
4449  int next_i= i + run2 + 1;
4450  int next_level= block[ perm_scantable[next_i] ] + 64;
4451 
4452  if(next_level&(~127))
4453  next_level= 0;
4454 
4455  if(next_i < last_non_zero)
4456  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4457  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4458  - length[UNI_AC_ENC_INDEX(run, 65)];
4459  else
4460  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4461  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4462  - length[UNI_AC_ENC_INDEX(run, 65)];
4463  }else{
4464  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4465  if(prev_level){
4466  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4467  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4468  }
4469  }
4470  }
4471 
4472  score *= lambda;
4473 
4474  unquant_change= new_coeff - old_coeff;
4475  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4476 
4477  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4478  unquant_change);
4479  if(score<best_score){
4480  best_score= score;
4481  best_coeff= i;
4482  best_change= change;
4483  best_unquant_change= unquant_change;
4484  }
4485  }
4486  if(level){
4487  prev_level= level + 64;
4488  if(prev_level&(~127))
4489  prev_level= 0;
4490  prev_run= run;
4491  run=0;
4492  }else{
4493  run++;
4494  }
4495  }
4496 
4497  if(best_change){
4498  int j= perm_scantable[ best_coeff ];
4499 
4500  block[j] += best_change;
4501 
4502  if(best_coeff > last_non_zero){
4503  last_non_zero= best_coeff;
4504  av_assert2(block[j]);
4505  }else{
4506  for(; last_non_zero>=start_i; last_non_zero--){
4507  if(block[perm_scantable[last_non_zero]])
4508  break;
4509  }
4510  }
4511 
4512  run=0;
4513  rle_index=0;
4514  for(i=start_i; i<=last_non_zero; i++){
4515  int j= perm_scantable[i];
4516  const int level= block[j];
4517 
4518  if(level){
4519  run_tab[rle_index++]=run;
4520  run=0;
4521  }else{
4522  run++;
4523  }
4524  }
4525 
4526  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4527  }else{
4528  break;
4529  }
4530  }
4531 
4532  return last_non_zero;
4533 }
4534 
4535 /**
4536  * Permute an 8x8 block according to permutation.
4537  * @param block the block which will be permuted according to
4538  * the given permutation vector
4539  * @param permutation the permutation vector
4540  * @param last the last non zero coefficient in scantable order, used to
4541  * speed the permutation up
4542  * @param scantable the used scantable, this is only used to speed the
4543  * permutation up, the block is not (inverse) permutated
4544  * to scantable order!
4545  */
4546 void ff_block_permute(int16_t *block, uint8_t *permutation,
4547  const uint8_t *scantable, int last)
4548 {
4549  int i;
4550  int16_t temp[64];
4551 
4552  if (last <= 0)
4553  return;
4554  //FIXME it is ok but not clean and might fail for some permutations
4555  // if (permutation[1] == 1)
4556  // return;
4557 
4558  for (i = 0; i <= last; i++) {
4559  const int j = scantable[i];
4560  temp[j] = block[j];
4561  block[j] = 0;
4562  }
4563 
4564  for (i = 0; i <= last; i++) {
4565  const int j = scantable[i];
4566  const int perm_j = permutation[j];
4567  block[perm_j] = temp[j];
4568  }
4569 }
4570 
4572  int16_t *block, int n,
4573  int qscale, int *overflow)
4574 {
4575  int i, j, level, last_non_zero, q, start_i;
4576  const int *qmat;
4577  const uint8_t *scantable;
4578  int bias;
4579  int max=0;
4580  unsigned int threshold1, threshold2;
4581 
4582  s->fdsp.fdct(block);
4583 
4584  if(s->dct_error_sum)
4585  s->denoise_dct(s, block);
4586 
4587  if (s->mb_intra) {
4588  scantable= s->intra_scantable.scantable;
4589  if (!s->h263_aic) {
4590  if (n < 4)
4591  q = s->y_dc_scale;
4592  else
4593  q = s->c_dc_scale;
4594  q = q << 3;
4595  } else
4596  /* For AIC we skip quant/dequant of INTRADC */
4597  q = 1 << 3;
4598 
4599  /* note: block[0] is assumed to be positive */
4600  block[0] = (block[0] + (q >> 1)) / q;
4601  start_i = 1;
4602  last_non_zero = 0;
4603  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4604  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4605  } else {
4606  scantable= s->inter_scantable.scantable;
4607  start_i = 0;
4608  last_non_zero = -1;
4609  qmat = s->q_inter_matrix[qscale];
4610  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4611  }
4612  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4613  threshold2= (threshold1<<1);
4614  for(i=63;i>=start_i;i--) {
4615  j = scantable[i];
4616  level = block[j] * qmat[j];
4617 
4618  if(((unsigned)(level+threshold1))>threshold2){
4619  last_non_zero = i;
4620  break;
4621  }else{
4622  block[j]=0;
4623  }
4624  }
4625  for(i=start_i; i<=last_non_zero; i++) {
4626  j = scantable[i];
4627  level = block[j] * qmat[j];
4628 
4629 // if( bias+level >= (1<<QMAT_SHIFT)
4630 // || bias-level >= (1<<QMAT_SHIFT)){
4631  if(((unsigned)(level+threshold1))>threshold2){
4632  if(level>0){
4633  level= (bias + level)>>QMAT_SHIFT;
4634  block[j]= level;
4635  }else{
4636  level= (bias - level)>>QMAT_SHIFT;
4637  block[j]= -level;
4638  }
4639  max |=level;
4640  }else{
4641  block[j]=0;
4642  }
4643  }
4644  *overflow= s->max_qcoeff < max; //overflow might have happened
4645 
4646  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4647  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4648  ff_block_permute(block, s->idsp.idct_permutation,
4649  scantable, last_non_zero);
4650 
4651  return last_non_zero;
4652 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:88
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1304
MpegEncContext::i_count
int i_count
Definition: mpegvideo.h:341
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:681
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:82
ff_speedhq_end_slice
void ff_speedhq_end_slice(MpegEncContext *s)
Definition: speedhqenc.c:149
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:234
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:427
MpegEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:286
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:261
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
h263data.h
ff_speedhq_encode_init
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
Definition: speedhqenc.c:98
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:100
level
uint8_t level
Definition: svq3.c:204
MpegEncContext::data_partitioning
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:386
av_clip
#define av_clip
Definition: common.h:98
set_frame_distances
static void set_frame_distances(MpegEncContext *s)
Definition: mpegvideo_enc.c:3566
get_bits_diff
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideoenc.h:139
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:540
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:196
H263_GOB_HEIGHT
#define H263_GOB_HEIGHT(h)
Definition: h263.h:28
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
r
const char * r
Definition: vf_curves.c:126
acc
int acc
Definition: yuv2rgb.c:554
ff_h261_encode_init
av_cold int ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:372
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:222
ff_mpeg1_encode_mb
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
MpegEncContext::b_code
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:225
mem_internal.h
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:521
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:258
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1299
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1655
MAY_BE_MPEG12
#define MAY_BE_MPEG12
Definition: mpv_reconstruct_mb_template.c:24
ff_speedhq_encode_picture_header
void ff_speedhq_encode_picture_header(MpegEncContext *s)
Definition: speedhqenc.c:137
ff_wmv2_encode_picture_header
int ff_wmv2_encode_picture_header(MpegEncContext *s)
Definition: wmv2enc.c:96
thread.h
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:607
encode_mb
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2510
matrix
Definition: vc1dsp.c:42
ff_h261_encode_picture_header
void ff_h261_encode_picture_header(MpegEncContext *s)
Definition: h261enc.c:55
src1
const pixel * src1
Definition: h264pred_template.c:421
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:224
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
sse_mb
static int sse_mb(MpegEncContext *s)
Definition: mpegvideo_enc.c:2659
mpv_reconstruct_mb_template.c
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
COPY
#define COPY(a)
ff_block_permute
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4546
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4198
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:974
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:169
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:163
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2712
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:834
update_noise_reduction
static void update_noise_reduction(MpegEncContext *s)
Definition: mpegvideo_enc.c:1689
out_size
int out_size
Definition: movenc.c:55
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:259
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:130
MpegEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:387
MpegEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:327
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:490
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:344
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:222
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:456
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:37
AVFrame::width
int width
Definition: frame.h:416
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:107
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:206
w
uint8_t w
Definition: llviddspenc.c:38
qmat16
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.h:356
BUF_BITS
static const int BUF_BITS
Definition: put_bits.h:48
internal.h
ff_h261_encode_mb
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:234
AVPacket::data
uint8_t * data
Definition: packet.h:522
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:374
AVOption
AVOption.
Definition: opt.h:346
encode.h
b
#define b
Definition: input.c:41
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:100
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:109
data
const char data[16]
Definition: mxf.c:148
MpegEncContext::vbv_delay
int vbv_delay
Definition: mpegvideo.h:202
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
MpegEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideo.h:214
ff_mjpeg_encode_init
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:294
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegutils.h:114
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:815
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:845
ff_msmpeg4_encode_mb
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:386
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:540
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:39
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:85
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:481
sqr
static double sqr(double in)
Definition: af_afwtdn.c:871
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:326
Picture
Picture.
Definition: mpegpicture.h:46
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:100
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2693
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2095
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
MpegEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideo.h:140
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:962
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:98
wmv2enc.h
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1263
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:351
mpegutils.h
ff_flv_encode_picture_header
void ff_flv_encode_picture_header(MpegEncContext *s)
Definition: flvenc.c:28
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:228
encode_picture
static int encode_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:3580
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:601
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:577
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:74
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
MPEGVIDEO_MAX_PLANES
#define MPEGVIDEO_MAX_PLANES
Definition: mpegpicture.h:32
MpegEncContext::mv
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:271
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:365
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:258
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:108
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:108
MAX_FCODE
#define MAX_FCODE
Definition: mpegutils.h:41
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:904
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:232
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1756
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:65
sp5x.h
estimate_qp
static int estimate_qp(MpegEncContext *s, int dry_run)
Definition: mpegvideo_enc.c:3528
FDCTDSPContext
Definition: fdctdsp.h:28
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
Definition: mpegpicture.c:253
faandct.h
Floating point AAN DCT.
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:201
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:197
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:834
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:454
mpeg12enc.h
merge_context_after_me
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3494
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:190
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:453
skip_check
static int skip_check(MpegEncContext *s, const Picture *p, const Picture *ref)
Definition: mpegvideo_enc.c:1263
fail
#define fail()
Definition: checkasm.h:179
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:139
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:105
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1008
ff_h263dsp_init
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
perm
perm
Definition: f_perms.c:75
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1231
FF_ALLOC_TYPED_ARRAY
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:87
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:334
pts
static int64_t pts
Definition: transcode_aac.c:643
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:42
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
ff_h263_update_motion_val
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:53
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:318
ff_sqrt
#define ff_sqrt
Definition: mathops.h:218
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
mpv_encode_init_static
static void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:265
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:99
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:439
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
ff_dct_encode_init
av_cold int ff_dct_encode_init(MpegEncContext *s)
Definition: mpegvideo_enc.c:290
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:330
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2823
ff_mjpeg_encode_mb
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:523
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:118
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:876
MpegEncContext::i_tex_bits
int i_tex_bits
Definition: mpegvideo.h:339
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:782
frame_start
static int frame_start(MpegEncContext *s)
Definition: mpegvideo_enc.c:1710
RateControlContext
rate control context.
Definition: ratecontrol.h:63
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
ff_init_qscale_tab
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:235
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
update_mb_info
static void update_mb_info(MpegEncContext *s, int startcode)
Definition: mpegvideo_enc.c:2800
av_cold
#define av_cold
Definition: attributes.h:90
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
MAX_MV
#define MAX_MV
Definition: motion_est.h:35
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:595
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:92
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:129
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_enc.c:1040
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4200
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:723
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
width
#define width
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:40
ff_h263_chroma_qscale_table
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:260
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1057
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:112
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:450
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
MpegEncContext::mv_dir
int mv_dir
Definition: mpegvideo.h:256
g
const char * g
Definition: vf_curves.c:127
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:864
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1494
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:150
FMT_H261
@ FMT_H261
Definition: mpegutils.h:118
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1574
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:219
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1292
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1527
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:269
PutBitContext
Definition: put_bits.h:50
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
frame
static AVFrame * frame
Definition: demux_decode.c:54
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:855
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2737
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
Picture::reference
int reference
Definition: mpegpicture.h:77
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1277
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:380
MpegEncContext::pb2
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:390
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
ff_mpeg4_encode_mb
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:484
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
PutBitContext::buf
uint8_t * buf
Definition: put_bits.h:53
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:140
NULL
#define NULL
Definition: coverity.c:32
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpv_reconstruct_mb_template.c:56
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:679
write_mb_info
static void write_mb_info(MpegEncContext *s)
Definition: mpegvideo_enc.c:2780
run
uint8_t run
Definition: svq3.c:203
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:283
MpegEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideo.h:325
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:224
bias
static int bias(int x, int c)
Definition: vqcdec.c:114
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:342
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
EDGE_WIDTH
#define EDGE_WIDTH
Definition: mpegpicture.h:34
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(Picture *dst, Picture *src)
Definition: mpegpicture.c:304
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:480
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:495
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:56
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo_enc.c:248
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:878
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:275
mathops.h
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:347
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3493
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:894
ff_vbv_update
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:690
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:987
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:963
qpeldsp.h
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:128
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1327
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
mpeg12codecs.h
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1324
ff_mjpeg_amv_encode_picture_header
void ff_mjpeg_amv_encode_picture_header(MpegEncContext *s)
Definition: mjpegenc.c:93
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegutils.h:120
AVOnce
#define AVOnce
Definition: thread.h:202
ff_mpeg4_encode_picture_header
int ff_mpeg4_encode_picture_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1059
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_mpeg1_encode_init
void ff_mpeg1_encode_init(MpegEncContext *s)
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1562
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
ff_clean_h263_qscales
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
Definition: ituh263enc.c:272
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:262
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:841
mpv_encode_defaults
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:275
denoise_dct_c
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
Definition: mpegvideo_enc.c:3861
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:411
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1337
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:544
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:794
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1402
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:131
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1334
encode_mb_internal
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2119
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:236
merge_context_after_encode
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3500
MpegEncContext::mb_intra
int mb_intra
Definition: mpegvideo.h:285
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:80
Picture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:80
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:523
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: vvc_intra.c:291
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1031
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:194
load_input_picture
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1120
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:57
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:354
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
Definition: mpegvideo_enc.c:2005
shift
static int shift(int a, int b)
Definition: bonk.c:262
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:295
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:594
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1065
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:106
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:197
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
MpegEncContext::dct_count
int dct_count[2]
Definition: mpegvideo.h:326
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegutils.h:121
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:281
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:94
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:86
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:41
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:112
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:279
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:521
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:314
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
MpegEncContext::mv_type
int mv_type
Definition: mpegvideo.h:260
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:264
ff_h263_encode_mb
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:494
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:528
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:63
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:284
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:815
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:87
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:36
M_PI
#define M_PI
Definition: mathematics.h:67
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
PutBitContext::buf_end
uint8_t * buf_end
Definition: put_bits.h:53
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
update_qscale
static void update_qscale(MpegEncContext *s)
Definition: mpegvideo_enc.c:191
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:138
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:451
ff_msmpeg4_encode_picture_header
void ff_msmpeg4_encode_picture_header(MpegEncContext *s)
Definition: msmpeg4enc.c:219
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:299
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
ff_speedhq_encode_mb
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: speedhqenc.c:246
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:146
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:455
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:94
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:449
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:515
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3885
MpegEncContext::encoding_error
uint64_t encoding_error[MPEGVIDEO_MAX_PLANES]
Definition: mpegvideo.h:251
get_intra_count
static int get_intra_count(MpegEncContext *s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1071
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2857
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:35
estimate_best_b_count
static int estimate_best_b_count(MpegEncContext *s)
Definition: mpegvideo_enc.c:1325
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:997
src2
const pixel * src2
Definition: h264pred_template.c:422
MpegEncContext::tex_pb
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:389
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:274
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:283
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:105
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:816
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:905
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int encoding, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:203
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:576
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
copy_context_before_encode
static void copy_context_before_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2520
ff_init_me
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:308
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:246
AVCodecContext::height
int height
Definition: avcodec.h:618
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:507
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:681
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:102
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
idctdsp.h
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:352
ret
ret
Definition: filter_design.txt:187
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic)
Definition: mpegvideo_enc.c:1094
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:290
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1379
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:110
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
encode_mb_hq
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2587
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:105
ff_mpeg1_encode_picture_header
void ff_mpeg1_encode_picture_header(MpegEncContext *s)
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
Picture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:81
mpeg12data.h
ff_set_mpeg4_time
void ff_set_mpeg4_time(MpegEncContext *s)
Definition: mpeg4videoenc.c:874
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
ff_fix_long_mvs
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1704
MpegEncContext::dquant
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:200
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:452
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:862
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFrame::height
int height
Definition: frame.h:416
default_mv_penalty
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:91
Picture::shared
int shared
Definition: mpegpicture.h:78
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:890
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1363
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:377
MpegEncContext::skip_count
int skip_count
Definition: mpegvideo.h:342
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:97
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: avpacket.c:231
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1256
AVRational::den
int den
Denominator.
Definition: rational.h:60
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:47
select_input_picture
static int select_input_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:1470
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:848
frame_end
static void frame_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:1653
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
temp
else temp
Definition: vf_mcdeint.c:263
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:862
flvenc.h
av_clip_uint8
#define av_clip_uint8
Definition: common.h:104
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:83
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:103
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:965
ff_h263_encode_picture_header
void ff_h263_encode_picture_header(MpegEncContext *s)
Definition: ituh263enc.c:108
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
ff_speedhq_mb_y_order_to_mb
int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.c:274
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:795
MpegEncContext::noise_reduction
int noise_reduction
Definition: mpegvideo.h:537
ff_convert_matrix
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:107
packet_internal.h
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:310
MpegEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideo.h:531
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1306
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
ff_set_cmp
int ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:476
dct_quantize_refine
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4219
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MpegEncContext *s)
Definition: rv10enc.c:34
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
copy_context_after_encode
static void copy_context_after_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2548
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:257
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1047
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:964
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
mpeg4videodata.h
clip_coeffs
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
Definition: mpegvideo_enc.c:2061
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:983
MAX_B_FRAMES
#define MAX_B_FRAMES
Definition: mpegvideo.h:53
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
d
d
Definition: ffmpeg_filter.c:409
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:389
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:79
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sse
static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2638
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:343
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:461
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
ff_get_2pass_fcode
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:865
h
h
Definition: vp9dsp_template.c:2038
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:856
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:143
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:40
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
ff_dct_quantize_c
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4571
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
int
int
Definition: ffmpeg_filter.c:409
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:282
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:220
ff_wmv2_encode_mb
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:169
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:642
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:340
ff_rv20_encode_picture_header
void ff_rv20_encode_picture_header(MpegEncContext *s)
Definition: rv20enc.c:37
pixblockdsp.h
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:104
ff_get_best_fcode
int ff_get_best_fcode(MpegEncContext *s, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1602
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:978
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:310
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:448
h263.h
write_slice_end
static void write_slice_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:2760
intmath.h