FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <stdint.h>
37 
38 #include "libavutil/internal.h"
39 #include "libavutil/intmath.h"
40 #include "libavutil/mathematics.h"
41 #include "libavutil/mem_internal.h"
42 #include "libavutil/pixdesc.h"
43 #include "libavutil/opt.h"
44 #include "libavutil/thread.h"
45 #include "avcodec.h"
46 #include "dct.h"
47 #include "encode.h"
48 #include "idctdsp.h"
49 #include "mpeg12.h"
50 #include "mpeg12data.h"
51 #include "mpeg12enc.h"
52 #include "mpegvideo.h"
53 #include "mpegvideodata.h"
54 #include "mpegvideoenc.h"
55 #include "h261enc.h"
56 #include "h263.h"
57 #include "h263data.h"
58 #include "h263enc.h"
59 #include "mjpegenc_common.h"
60 #include "mathops.h"
61 #include "mpegutils.h"
62 #include "mjpegenc.h"
63 #include "speedhqenc.h"
64 #include "msmpeg4enc.h"
65 #include "pixblockdsp.h"
66 #include "qpeldsp.h"
67 #include "faandct.h"
68 #include "aandcttab.h"
69 #include "flvenc.h"
70 #include "mpeg4video.h"
71 #include "mpeg4videodata.h"
72 #include "mpeg4videoenc.h"
73 #include "internal.h"
74 #include "bytestream.h"
75 #include "wmv2enc.h"
76 #include "rv10enc.h"
77 #include "packet_internal.h"
78 #include <limits.h>
79 #include "sp5x.h"
80 
81 #define QUANT_BIAS_SHIFT 8
82 
83 #define QMAT_SHIFT_MMX 16
84 #define QMAT_SHIFT 21
85 
87 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
88 static int sse_mb(MpegEncContext *s);
89 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
90 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
91 
92 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
93 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
94 
95 static const AVOption mpv_generic_options[] = {
98  { NULL },
99 };
100 
102  .class_name = "generic mpegvideo encoder",
103  .item_name = av_default_item_name,
104  .option = mpv_generic_options,
105  .version = LIBAVUTIL_VERSION_INT,
106 };
107 
108 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
109  uint16_t (*qmat16)[2][64],
110  const uint16_t *quant_matrix,
111  int bias, int qmin, int qmax, int intra)
112 {
113  FDCTDSPContext *fdsp = &s->fdsp;
114  int qscale;
115  int shift = 0;
116 
117  for (qscale = qmin; qscale <= qmax; qscale++) {
118  int i;
119  int qscale2;
120 
121  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
122  else qscale2 = qscale << 1;
123 
124  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
125 #if CONFIG_FAANDCT
126  fdsp->fdct == ff_faandct ||
127 #endif /* CONFIG_FAANDCT */
129  for (i = 0; i < 64; i++) {
130  const int j = s->idsp.idct_permutation[i];
131  int64_t den = (int64_t) qscale2 * quant_matrix[j];
132  /* 16 <= qscale * quant_matrix[i] <= 7905
133  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
134  * 19952 <= x <= 249205026
135  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
136  * 3444240 >= (1 << 36) / (x) >= 275 */
137 
138  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
139  }
140  } else if (fdsp->fdct == ff_fdct_ifast) {
141  for (i = 0; i < 64; i++) {
142  const int j = s->idsp.idct_permutation[i];
143  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
144  /* 16 <= qscale * quant_matrix[i] <= 7905
145  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
146  * 19952 <= x <= 249205026
147  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
148  * 3444240 >= (1 << 36) / (x) >= 275 */
149 
150  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
151  }
152  } else {
153  for (i = 0; i < 64; i++) {
154  const int j = s->idsp.idct_permutation[i];
155  int64_t den = (int64_t) qscale2 * quant_matrix[j];
156  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
157  * Assume x = qscale * quant_matrix[i]
158  * So 16 <= x <= 7905
159  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
160  * so 32768 >= (1 << 19) / (x) >= 67 */
161  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
162  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
163  // (qscale * quant_matrix[i]);
164  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
165 
166  if (qmat16[qscale][0][i] == 0 ||
167  qmat16[qscale][0][i] == 128 * 256)
168  qmat16[qscale][0][i] = 128 * 256 - 1;
169  qmat16[qscale][1][i] =
170  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
171  qmat16[qscale][0][i]);
172  }
173  }
174 
175  for (i = intra; i < 64; i++) {
176  int64_t max = 8191;
177  if (fdsp->fdct == ff_fdct_ifast) {
178  max = (8191LL * ff_aanscales[i]) >> 14;
179  }
180  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
181  shift++;
182  }
183  }
184  }
185  if (shift) {
186  av_log(s->avctx, AV_LOG_INFO,
187  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
188  QMAT_SHIFT - shift);
189  }
190 }
191 
192 static inline void update_qscale(MpegEncContext *s)
193 {
194  if (s->q_scale_type == 1 && 0) {
195  int i;
196  int bestdiff=INT_MAX;
197  int best = 1;
198 
199  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
200  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
201  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
202  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
203  continue;
204  if (diff < bestdiff) {
205  bestdiff = diff;
206  best = i;
207  }
208  }
209  s->qscale = best;
210  } else {
211  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
212  (FF_LAMBDA_SHIFT + 7);
213  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
214  }
215 
216  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
218 }
219 
221 {
222  int i;
223 
224  if (matrix) {
225  put_bits(pb, 1, 1);
226  for (i = 0; i < 64; i++) {
228  }
229  } else
230  put_bits(pb, 1, 0);
231 }
232 
233 /**
234  * init s->current_picture.qscale_table from s->lambda_table
235  */
237 {
238  int8_t * const qscale_table = s->current_picture.qscale_table;
239  int i;
240 
241  for (i = 0; i < s->mb_num; i++) {
242  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
243  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
244  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
245  s->avctx->qmax);
246  }
247 }
248 
250  const MpegEncContext *src)
251 {
252 #define COPY(a) dst->a= src->a
253  COPY(pict_type);
255  COPY(f_code);
256  COPY(b_code);
257  COPY(qscale);
258  COPY(lambda);
259  COPY(lambda2);
260  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
261  COPY(progressive_frame); // FIXME don't set in encode_header
262  COPY(partitioned_frame); // FIXME don't set in encode_header
263 #undef COPY
264 }
265 
266 static void mpv_encode_init_static(void)
267 {
268  for (int i = -16; i < 16; i++)
269  default_fcode_tab[i + MAX_MV] = 1;
270 }
271 
272 /**
273  * Set the given MpegEncContext to defaults for encoding.
274  * the changed fields will not depend upon the prior state of the MpegEncContext.
275  */
277 {
278  static AVOnce init_static_once = AV_ONCE_INIT;
279 
281 
282  ff_thread_once(&init_static_once, mpv_encode_init_static);
283 
284  s->me.mv_penalty = default_mv_penalty;
285  s->fcode_tab = default_fcode_tab;
286 
287  s->input_picture_number = 0;
288  s->picture_in_gop_number = 0;
289 }
290 
292 {
293 #if ARCH_X86
295 #endif
296 
297  if (CONFIG_H263_ENCODER)
298  ff_h263dsp_init(&s->h263dsp);
299  if (!s->dct_quantize)
300  s->dct_quantize = ff_dct_quantize_c;
301  if (!s->denoise_dct)
302  s->denoise_dct = denoise_dct_c;
303  s->fast_dct_quantize = s->dct_quantize;
304  if (s->avctx->trellis)
305  s->dct_quantize = dct_quantize_trellis_c;
306 
307  return 0;
308 }
309 
310 /* init video encoder */
312 {
314  AVCPBProperties *cpb_props;
315  int i, ret;
316 
318 
319  switch (avctx->pix_fmt) {
320  case AV_PIX_FMT_YUVJ444P:
321  case AV_PIX_FMT_YUV444P:
322  s->chroma_format = CHROMA_444;
323  break;
324  case AV_PIX_FMT_YUVJ422P:
325  case AV_PIX_FMT_YUV422P:
326  s->chroma_format = CHROMA_422;
327  break;
328  case AV_PIX_FMT_YUVJ420P:
329  case AV_PIX_FMT_YUV420P:
330  default:
331  s->chroma_format = CHROMA_420;
332  break;
333  }
334 
336 
337  s->bit_rate = avctx->bit_rate;
338  s->width = avctx->width;
339  s->height = avctx->height;
340  if (avctx->gop_size > 600 &&
343  "keyframe interval too large!, reducing it from %d to %d\n",
344  avctx->gop_size, 600);
345  avctx->gop_size = 600;
346  }
347  s->gop_size = avctx->gop_size;
348  s->avctx = avctx;
350  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
351  "is %d.\n", MAX_B_FRAMES);
353  } else if (avctx->max_b_frames < 0) {
355  "max b frames must be 0 or positive for mpegvideo based encoders\n");
356  return AVERROR(EINVAL);
357  }
358  s->max_b_frames = avctx->max_b_frames;
359  s->codec_id = avctx->codec->id;
360  if (s->max_b_frames && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
361  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
362  return AVERROR(EINVAL);
363  }
364 
365  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
366  s->rtp_mode = !!s->rtp_payload_size;
367  s->intra_dc_precision = avctx->intra_dc_precision;
368 
369  // workaround some differences between how applications specify dc precision
370  if (s->intra_dc_precision < 0) {
371  s->intra_dc_precision += 8;
372  } else if (s->intra_dc_precision >= 8)
373  s->intra_dc_precision -= 8;
374 
375  if (s->intra_dc_precision < 0) {
377  "intra dc precision must be positive, note some applications use"
378  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
379  return AVERROR(EINVAL);
380  }
381 
382  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
383  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
384  return AVERROR(EINVAL);
385  }
386  s->user_specified_pts = AV_NOPTS_VALUE;
387 
388  if (s->gop_size <= 1) {
389  s->intra_only = 1;
390  s->gop_size = 12;
391  } else {
392  s->intra_only = 0;
393  }
394 
395  /* Fixed QSCALE */
396  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
397 
398  s->adaptive_quant = (avctx->lumi_masking ||
399  avctx->dark_masking ||
402  avctx->p_masking ||
403  s->border_masking ||
404  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
405  !s->fixed_qscale;
406 
407  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
408 
410  switch(avctx->codec_id) {
413  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
414  break;
415  case AV_CODEC_ID_MPEG4:
419  if (avctx->rc_max_rate >= 15000000) {
420  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
421  } else if(avctx->rc_max_rate >= 2000000) {
422  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
423  } else if(avctx->rc_max_rate >= 384000) {
424  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
425  } else
426  avctx->rc_buffer_size = 40;
427  avctx->rc_buffer_size *= 16384;
428  break;
429  }
430  if (avctx->rc_buffer_size) {
431  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
432  }
433  }
434 
435  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
436  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
437  return AVERROR(EINVAL);
438  }
439 
442  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
443  }
444 
446  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
447  return AVERROR(EINVAL);
448  }
449 
451  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
452  return AVERROR(EINVAL);
453  }
454 
455  if (avctx->rc_max_rate &&
459  "impossible bitrate constraints, this will fail\n");
460  }
461 
462  if (avctx->rc_buffer_size &&
463  avctx->bit_rate * (int64_t)avctx->time_base.num >
464  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
465  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
466  return AVERROR(EINVAL);
467  }
468 
469  if (!s->fixed_qscale &&
472  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
474  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
475  if (nbt <= INT_MAX) {
476  avctx->bit_rate_tolerance = nbt;
477  } else
478  avctx->bit_rate_tolerance = INT_MAX;
479  }
480 
481  if (avctx->rc_max_rate &&
483  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
484  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
485  90000LL * (avctx->rc_buffer_size - 1) >
486  avctx->rc_max_rate * 0xFFFFLL) {
488  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
489  "specified vbv buffer is too large for the given bitrate!\n");
490  }
491 
492  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
493  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
494  s->codec_id != AV_CODEC_ID_FLV1) {
495  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
496  return AVERROR(EINVAL);
497  }
498 
499  if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
501  "OBMC is only supported with simple mb decision\n");
502  return AVERROR(EINVAL);
503  }
504 
505  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
506  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
507  return AVERROR(EINVAL);
508  }
509 
510  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
511  s->codec_id == AV_CODEC_ID_H263 ||
512  s->codec_id == AV_CODEC_ID_H263P) &&
513  (avctx->sample_aspect_ratio.num > 255 ||
514  avctx->sample_aspect_ratio.den > 255)) {
516  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
520  }
521 
522  if ((s->codec_id == AV_CODEC_ID_H263 ||
523  s->codec_id == AV_CODEC_ID_H263P) &&
524  (avctx->width > 2048 ||
525  avctx->height > 1152 )) {
526  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
527  return AVERROR(EINVAL);
528  }
529  if ((s->codec_id == AV_CODEC_ID_H263 ||
530  s->codec_id == AV_CODEC_ID_H263P ||
531  s->codec_id == AV_CODEC_ID_RV20) &&
532  ((avctx->width &3) ||
533  (avctx->height&3) )) {
534  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
535  return AVERROR(EINVAL);
536  }
537 
538  if (s->codec_id == AV_CODEC_ID_RV10 &&
539  (avctx->width &15 ||
540  avctx->height&15 )) {
541  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
542  return AVERROR(EINVAL);
543  }
544 
545  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
546  s->codec_id == AV_CODEC_ID_WMV2) &&
547  avctx->width & 1) {
548  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
549  return AVERROR(EINVAL);
550  }
551 
553  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
554  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
555  return AVERROR(EINVAL);
556  }
557 
558  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
559  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
560  return AVERROR(EINVAL);
561  }
562 
563  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
565  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
566  return AVERROR(EINVAL);
567  }
568 
569  if (s->scenechange_threshold < 1000000000 &&
572  "closed gop with scene change detection are not supported yet, "
573  "set threshold to 1000000000\n");
574  return AVERROR_PATCHWELCOME;
575  }
576 
578  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
581  "low delay forcing is only available for mpeg2, "
582  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
583  return AVERROR(EINVAL);
584  }
585  if (s->max_b_frames != 0) {
587  "B-frames cannot be used with low delay\n");
588  return AVERROR(EINVAL);
589  }
590  }
591 
592  if (s->q_scale_type == 1) {
593  if (avctx->qmax > 28) {
595  "non linear quant only supports qmax <= 28 currently\n");
596  return AVERROR_PATCHWELCOME;
597  }
598  }
599 
600  if (avctx->slices > 1 &&
602  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
603  return AVERROR(EINVAL);
604  }
605 
606  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
608  "notice: b_frame_strategy only affects the first pass\n");
609  s->b_frame_strategy = 0;
610  }
611 
613  if (i > 1) {
614  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
615  avctx->time_base.den /= i;
616  avctx->time_base.num /= i;
617  //return -1;
618  }
619 
620  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
621  // (a + x * 3 / 8) / x
622  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
623  s->inter_quant_bias = 0;
624  } else {
625  s->intra_quant_bias = 0;
626  // (a - x / 4) / x
627  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
628  }
629 
630  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
631  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
632  return AVERROR(EINVAL);
633  }
634 
635  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
636 
637  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
638  avctx->time_base.den > (1 << 16) - 1) {
640  "timebase %d/%d not supported by MPEG 4 standard, "
641  "the maximum admitted value for the timebase denominator "
642  "is %d\n", avctx->time_base.num, avctx->time_base.den,
643  (1 << 16) - 1);
644  return AVERROR(EINVAL);
645  }
646  s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
647 
648  switch (avctx->codec->id) {
650  s->out_format = FMT_MPEG1;
651  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
652  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
653  break;
655  s->out_format = FMT_MPEG1;
656  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
657  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
658  s->rtp_mode = 1;
659  break;
660 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
661  case AV_CODEC_ID_MJPEG:
662  case AV_CODEC_ID_AMV:
663  s->out_format = FMT_MJPEG;
664  s->intra_only = 1; /* force intra only for jpeg */
665  if ((ret = ff_mjpeg_encode_init(s)) < 0)
666  return ret;
667  avctx->delay = 0;
668  s->low_delay = 1;
669  break;
670 #endif
671  case AV_CODEC_ID_SPEEDHQ:
672  s->out_format = FMT_SPEEDHQ;
673  s->intra_only = 1; /* force intra only for SHQ */
674  if (!CONFIG_SPEEDHQ_ENCODER)
676  if ((ret = ff_speedhq_encode_init(s)) < 0)
677  return ret;
678  avctx->delay = 0;
679  s->low_delay = 1;
680  break;
681  case AV_CODEC_ID_H261:
682  if (!CONFIG_H261_ENCODER)
684  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
686  "The specified picture size of %dx%d is not valid for the "
687  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
688  s->width, s->height);
689  return AVERROR(EINVAL);
690  }
691  s->out_format = FMT_H261;
692  avctx->delay = 0;
693  s->low_delay = 1;
694  s->rtp_mode = 0; /* Sliced encoding not supported */
695  break;
696  case AV_CODEC_ID_H263:
697  if (!CONFIG_H263_ENCODER)
700  s->width, s->height) == 8) {
702  "The specified picture size of %dx%d is not valid for "
703  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
704  "352x288, 704x576, and 1408x1152. "
705  "Try H.263+.\n", s->width, s->height);
706  return AVERROR(EINVAL);
707  }
708  s->out_format = FMT_H263;
709  avctx->delay = 0;
710  s->low_delay = 1;
711  break;
712  case AV_CODEC_ID_H263P:
713  s->out_format = FMT_H263;
714  s->h263_plus = 1;
715  /* Fx */
716  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
717  s->modified_quant = s->h263_aic;
718  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
719  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
720 
721  /* /Fx */
722  /* These are just to be sure */
723  avctx->delay = 0;
724  s->low_delay = 1;
725  break;
726  case AV_CODEC_ID_FLV1:
727  s->out_format = FMT_H263;
728  s->h263_flv = 2; /* format = 1; 11-bit codes */
729  s->unrestricted_mv = 1;
730  s->rtp_mode = 0; /* don't allow GOB */
731  avctx->delay = 0;
732  s->low_delay = 1;
733  break;
734  case AV_CODEC_ID_RV10:
735  s->out_format = FMT_H263;
736  avctx->delay = 0;
737  s->low_delay = 1;
738  break;
739  case AV_CODEC_ID_RV20:
740  s->out_format = FMT_H263;
741  avctx->delay = 0;
742  s->low_delay = 1;
743  s->modified_quant = 1;
744  s->h263_aic = 1;
745  s->h263_plus = 1;
746  s->loop_filter = 1;
747  s->unrestricted_mv = 0;
748  break;
749  case AV_CODEC_ID_MPEG4:
750  s->out_format = FMT_H263;
751  s->h263_pred = 1;
752  s->unrestricted_mv = 1;
753  s->low_delay = s->max_b_frames ? 0 : 1;
754  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
755  break;
757  s->out_format = FMT_H263;
758  s->h263_pred = 1;
759  s->unrestricted_mv = 1;
760  s->msmpeg4_version = 2;
761  avctx->delay = 0;
762  s->low_delay = 1;
763  break;
765  s->out_format = FMT_H263;
766  s->h263_pred = 1;
767  s->unrestricted_mv = 1;
768  s->msmpeg4_version = 3;
769  s->flipflop_rounding = 1;
770  avctx->delay = 0;
771  s->low_delay = 1;
772  break;
773  case AV_CODEC_ID_WMV1:
774  s->out_format = FMT_H263;
775  s->h263_pred = 1;
776  s->unrestricted_mv = 1;
777  s->msmpeg4_version = 4;
778  s->flipflop_rounding = 1;
779  avctx->delay = 0;
780  s->low_delay = 1;
781  break;
782  case AV_CODEC_ID_WMV2:
783  s->out_format = FMT_H263;
784  s->h263_pred = 1;
785  s->unrestricted_mv = 1;
786  s->msmpeg4_version = 5;
787  s->flipflop_rounding = 1;
788  avctx->delay = 0;
789  s->low_delay = 1;
790  break;
791  default:
792  return AVERROR(EINVAL);
793  }
794 
795  avctx->has_b_frames = !s->low_delay;
796 
797  s->encoding = 1;
798 
799  s->progressive_frame =
800  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
802  s->alternate_scan);
803 
804  /* init */
806  if ((ret = ff_mpv_common_init(s)) < 0)
807  return ret;
808 
809  ff_fdctdsp_init(&s->fdsp, avctx);
810  ff_me_cmp_init(&s->mecc, avctx);
811  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
812  ff_pixblockdsp_init(&s->pdsp, avctx);
813  ff_qpeldsp_init(&s->qdsp);
814 
815  if (!(avctx->stats_out = av_mallocz(256)) ||
816  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
817  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
818  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
819  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
820  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
821  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
822  !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
823  !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
824  return AVERROR(ENOMEM);
825 
826  if (s->noise_reduction) {
827  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
828  return AVERROR(ENOMEM);
829  }
830 
832 
833  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
834  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
835 
836  if (s->slice_context_count > 1) {
837  s->rtp_mode = 1;
838 
840  s->h263_slice_structured = 1;
841  }
842 
843  s->quant_precision = 5;
844 
845  ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
846  ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
847 
848  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261) {
850  } else if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
851  && s->out_format == FMT_MPEG1) {
853  } else if (CONFIG_H263_ENCODER && s->out_format == FMT_H263) {
855  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
857  }
858 
859  /* init q matrix */
860  for (i = 0; i < 64; i++) {
861  int j = s->idsp.idct_permutation[i];
862  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
863  s->mpeg_quant) {
864  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
865  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
866  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
867  s->intra_matrix[j] =
868  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
869  } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
870  s->intra_matrix[j] =
871  s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
872  } else {
873  /* MPEG-1/2 */
874  s->chroma_intra_matrix[j] =
875  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
876  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
877  }
878  if (avctx->intra_matrix)
879  s->intra_matrix[j] = avctx->intra_matrix[i];
880  if (avctx->inter_matrix)
881  s->inter_matrix[j] = avctx->inter_matrix[i];
882  }
883 
884  /* precompute matrix */
885  /* for mjpeg, we do include qscale in the matrix */
886  if (s->out_format != FMT_MJPEG) {
887  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
888  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
889  31, 1);
890  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
891  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
892  31, 0);
893  }
894 
895  if ((ret = ff_rate_control_init(s)) < 0)
896  return ret;
897 
898  if (s->b_frame_strategy == 2) {
899  for (i = 0; i < s->max_b_frames + 2; i++) {
900  s->tmp_frames[i] = av_frame_alloc();
901  if (!s->tmp_frames[i])
902  return AVERROR(ENOMEM);
903 
904  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
905  s->tmp_frames[i]->width = s->width >> s->brd_scale;
906  s->tmp_frames[i]->height = s->height >> s->brd_scale;
907 
908  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
909  if (ret < 0)
910  return ret;
911  }
912  }
913 
914  cpb_props = ff_add_cpb_side_data(avctx);
915  if (!cpb_props)
916  return AVERROR(ENOMEM);
917  cpb_props->max_bitrate = avctx->rc_max_rate;
918  cpb_props->min_bitrate = avctx->rc_min_rate;
919  cpb_props->avg_bitrate = avctx->bit_rate;
920  cpb_props->buffer_size = avctx->rc_buffer_size;
921 
922  return 0;
923 }
924 
926 {
928  int i;
929 
931 
933 
934  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
935  av_frame_free(&s->tmp_frames[i]);
936 
937  av_frame_free(&s->new_picture);
938 
940 
941  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
942  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
943  s->q_chroma_intra_matrix= NULL;
944  s->q_chroma_intra_matrix16= NULL;
945  av_freep(&s->q_intra_matrix);
946  av_freep(&s->q_inter_matrix);
947  av_freep(&s->q_intra_matrix16);
948  av_freep(&s->q_inter_matrix16);
949  av_freep(&s->input_picture);
950  av_freep(&s->reordered_input_picture);
951  av_freep(&s->dct_offset);
952 
953  return 0;
954 }
955 
956 static int get_sae(uint8_t *src, int ref, int stride)
957 {
958  int x,y;
959  int acc = 0;
960 
961  for (y = 0; y < 16; y++) {
962  for (x = 0; x < 16; x++) {
963  acc += FFABS(src[x + y * stride] - ref);
964  }
965  }
966 
967  return acc;
968 }
969 
970 static int get_intra_count(MpegEncContext *s, uint8_t *src,
971  uint8_t *ref, int stride)
972 {
973  int x, y, w, h;
974  int acc = 0;
975 
976  w = s->width & ~15;
977  h = s->height & ~15;
978 
979  for (y = 0; y < h; y += 16) {
980  for (x = 0; x < w; x += 16) {
981  int offset = x + y * stride;
982  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
983  stride, 16);
984  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
985  int sae = get_sae(src + offset, mean, stride);
986 
987  acc += sae + 500 < sad;
988  }
989  }
990  return acc;
991 }
992 
993 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
994 {
995  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
996  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
997  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
998  &s->linesize, &s->uvlinesize);
999 }
1000 
1001 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1002 {
1003  Picture *pic = NULL;
1004  int64_t pts;
1005  int i, display_picture_number = 0, ret;
1006  int encoding_delay = s->max_b_frames ? s->max_b_frames
1007  : (s->low_delay ? 0 : 1);
1008  int flush_offset = 1;
1009  int direct = 1;
1010 
1011  if (pic_arg) {
1012  pts = pic_arg->pts;
1013  display_picture_number = s->input_picture_number++;
1014 
1015  if (pts != AV_NOPTS_VALUE) {
1016  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1017  int64_t last = s->user_specified_pts;
1018 
1019  if (pts <= last) {
1020  av_log(s->avctx, AV_LOG_ERROR,
1021  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1022  pts, last);
1023  return AVERROR(EINVAL);
1024  }
1025 
1026  if (!s->low_delay && display_picture_number == 1)
1027  s->dts_delta = pts - last;
1028  }
1029  s->user_specified_pts = pts;
1030  } else {
1031  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1032  s->user_specified_pts =
1033  pts = s->user_specified_pts + 1;
1034  av_log(s->avctx, AV_LOG_INFO,
1035  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1036  pts);
1037  } else {
1038  pts = display_picture_number;
1039  }
1040  }
1041 
1042  if (!pic_arg->buf[0] ||
1043  pic_arg->linesize[0] != s->linesize ||
1044  pic_arg->linesize[1] != s->uvlinesize ||
1045  pic_arg->linesize[2] != s->uvlinesize)
1046  direct = 0;
1047  if ((s->width & 15) || (s->height & 15))
1048  direct = 0;
1049  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1050  direct = 0;
1051  if (s->linesize & (STRIDE_ALIGN-1))
1052  direct = 0;
1053 
1054  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1055  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1056 
1057  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1058  if (i < 0)
1059  return i;
1060 
1061  pic = &s->picture[i];
1062  pic->reference = 3;
1063 
1064  if (direct) {
1065  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1066  return ret;
1067  }
1068  ret = alloc_picture(s, pic, direct);
1069  if (ret < 0)
1070  return ret;
1071 
1072  if (!direct) {
1073  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1074  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1075  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1076  // empty
1077  } else {
1078  int h_chroma_shift, v_chroma_shift;
1079  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1080  &h_chroma_shift,
1081  &v_chroma_shift);
1082 
1083  for (i = 0; i < 3; i++) {
1084  int src_stride = pic_arg->linesize[i];
1085  int dst_stride = i ? s->uvlinesize : s->linesize;
1086  int h_shift = i ? h_chroma_shift : 0;
1087  int v_shift = i ? v_chroma_shift : 0;
1088  int w = s->width >> h_shift;
1089  int h = s->height >> v_shift;
1090  uint8_t *src = pic_arg->data[i];
1091  uint8_t *dst = pic->f->data[i];
1092  int vpad = 16;
1093 
1094  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1095  && !s->progressive_sequence
1096  && FFALIGN(s->height, 32) - s->height > 16)
1097  vpad = 32;
1098 
1099  if (!s->avctx->rc_buffer_size)
1100  dst += INPLACE_OFFSET;
1101 
1102  if (src_stride == dst_stride)
1103  memcpy(dst, src, src_stride * h);
1104  else {
1105  int h2 = h;
1106  uint8_t *dst2 = dst;
1107  while (h2--) {
1108  memcpy(dst2, src, w);
1109  dst2 += dst_stride;
1110  src += src_stride;
1111  }
1112  }
1113  if ((s->width & 15) || (s->height & (vpad-1))) {
1114  s->mpvencdsp.draw_edges(dst, dst_stride,
1115  w, h,
1116  16 >> h_shift,
1117  vpad >> v_shift,
1118  EDGE_BOTTOM);
1119  }
1120  }
1121  emms_c();
1122  }
1123  }
1124  ret = av_frame_copy_props(pic->f, pic_arg);
1125  if (ret < 0)
1126  return ret;
1127 
1128  pic->f->display_picture_number = display_picture_number;
1129  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1130  } else {
1131  /* Flushing: When we have not received enough input frames,
1132  * ensure s->input_picture[0] contains the first picture */
1133  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1134  if (s->input_picture[flush_offset])
1135  break;
1136 
1137  if (flush_offset <= 1)
1138  flush_offset = 1;
1139  else
1140  encoding_delay = encoding_delay - flush_offset + 1;
1141  }
1142 
1143  /* shift buffer entries */
1144  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1145  s->input_picture[i - flush_offset] = s->input_picture[i];
1146 
1147  s->input_picture[encoding_delay] = (Picture*) pic;
1148 
1149  return 0;
1150 }
1151 
1153 {
1154  int x, y, plane;
1155  int score = 0;
1156  int64_t score64 = 0;
1157 
1158  for (plane = 0; plane < 3; plane++) {
1159  const int stride = p->f->linesize[plane];
1160  const int bw = plane ? 1 : 2;
1161  for (y = 0; y < s->mb_height * bw; y++) {
1162  for (x = 0; x < s->mb_width * bw; x++) {
1163  int off = p->shared ? 0 : 16;
1164  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1165  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1166  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1167 
1168  switch (FFABS(s->frame_skip_exp)) {
1169  case 0: score = FFMAX(score, v); break;
1170  case 1: score += FFABS(v); break;
1171  case 2: score64 += v * (int64_t)v; break;
1172  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1173  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1174  }
1175  }
1176  }
1177  }
1178  emms_c();
1179 
1180  if (score)
1181  score64 = score;
1182  if (s->frame_skip_exp < 0)
1183  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1184  -1.0/s->frame_skip_exp);
1185 
1186  if (score64 < s->frame_skip_threshold)
1187  return 1;
1188  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1189  return 1;
1190  return 0;
1191 }
1192 
1194 {
1195  int ret;
1196  int size = 0;
1197 
1199  if (ret < 0)
1200  return ret;
1201 
1202  do {
1204  if (ret >= 0) {
1205  size += pkt->size;
1207  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1208  return ret;
1209  } while (ret >= 0);
1210 
1211  return size;
1212 }
1213 
1215 {
1216  AVPacket *pkt;
1217  const int scale = s->brd_scale;
1218  int width = s->width >> scale;
1219  int height = s->height >> scale;
1220  int i, j, out_size, p_lambda, b_lambda, lambda2;
1221  int64_t best_rd = INT64_MAX;
1222  int best_b_count = -1;
1223  int ret = 0;
1224 
1225  av_assert0(scale >= 0 && scale <= 3);
1226 
1227  pkt = av_packet_alloc();
1228  if (!pkt)
1229  return AVERROR(ENOMEM);
1230 
1231  //emms_c();
1232  //s->next_picture_ptr->quality;
1233  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1234  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1235  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1236  if (!b_lambda) // FIXME we should do this somewhere else
1237  b_lambda = p_lambda;
1238  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1240 
1241  for (i = 0; i < s->max_b_frames + 2; i++) {
1242  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1243  s->next_picture_ptr;
1244  uint8_t *data[4];
1245 
1246  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1247  pre_input = *pre_input_ptr;
1248  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1249 
1250  if (!pre_input.shared && i) {
1251  data[0] += INPLACE_OFFSET;
1252  data[1] += INPLACE_OFFSET;
1253  data[2] += INPLACE_OFFSET;
1254  }
1255 
1256  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1257  s->tmp_frames[i]->linesize[0],
1258  data[0],
1259  pre_input.f->linesize[0],
1260  width, height);
1261  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1262  s->tmp_frames[i]->linesize[1],
1263  data[1],
1264  pre_input.f->linesize[1],
1265  width >> 1, height >> 1);
1266  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1267  s->tmp_frames[i]->linesize[2],
1268  data[2],
1269  pre_input.f->linesize[2],
1270  width >> 1, height >> 1);
1271  }
1272  }
1273 
1274  for (j = 0; j < s->max_b_frames + 1; j++) {
1275  AVCodecContext *c;
1276  int64_t rd = 0;
1277 
1278  if (!s->input_picture[j])
1279  break;
1280 
1282  if (!c) {
1283  ret = AVERROR(ENOMEM);
1284  goto fail;
1285  }
1286 
1287  c->width = width;
1288  c->height = height;
1290  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1291  c->mb_decision = s->avctx->mb_decision;
1292  c->me_cmp = s->avctx->me_cmp;
1293  c->mb_cmp = s->avctx->mb_cmp;
1294  c->me_sub_cmp = s->avctx->me_sub_cmp;
1295  c->pix_fmt = AV_PIX_FMT_YUV420P;
1296  c->time_base = s->avctx->time_base;
1297  c->max_b_frames = s->max_b_frames;
1298 
1299  ret = avcodec_open2(c, s->avctx->codec, NULL);
1300  if (ret < 0)
1301  goto fail;
1302 
1303 
1304  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1305  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1306 
1307  out_size = encode_frame(c, s->tmp_frames[0], pkt);
1308  if (out_size < 0) {
1309  ret = out_size;
1310  goto fail;
1311  }
1312 
1313  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1314 
1315  for (i = 0; i < s->max_b_frames + 1; i++) {
1316  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1317 
1318  s->tmp_frames[i + 1]->pict_type = is_p ?
1320  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1321 
1322  out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1323  if (out_size < 0) {
1324  ret = out_size;
1325  goto fail;
1326  }
1327 
1328  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1329  }
1330 
1331  /* get the delayed frames */
1333  if (out_size < 0) {
1334  ret = out_size;
1335  goto fail;
1336  }
1337  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1338 
1339  rd += c->error[0] + c->error[1] + c->error[2];
1340 
1341  if (rd < best_rd) {
1342  best_rd = rd;
1343  best_b_count = j;
1344  }
1345 
1346 fail:
1349  if (ret < 0) {
1350  best_b_count = ret;
1351  break;
1352  }
1353  }
1354 
1355  av_packet_free(&pkt);
1356 
1357  return best_b_count;
1358 }
1359 
1361 {
1362  int i, ret;
1363 
1364  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1365  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1366  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1367 
1368  /* set next picture type & ordering */
1369  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1370  if (s->frame_skip_threshold || s->frame_skip_factor) {
1371  if (s->picture_in_gop_number < s->gop_size &&
1372  s->next_picture_ptr &&
1373  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1374  // FIXME check that the gop check above is +-1 correct
1375  av_frame_unref(s->input_picture[0]->f);
1376 
1377  ff_vbv_update(s, 0);
1378 
1379  goto no_output_pic;
1380  }
1381  }
1382 
1383  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1384  !s->next_picture_ptr || s->intra_only) {
1385  s->reordered_input_picture[0] = s->input_picture[0];
1386  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1387  s->reordered_input_picture[0]->f->coded_picture_number =
1388  s->coded_picture_number++;
1389  } else {
1390  int b_frames = 0;
1391 
1392  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1393  for (i = 0; i < s->max_b_frames + 1; i++) {
1394  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1395 
1396  if (pict_num >= s->rc_context.num_entries)
1397  break;
1398  if (!s->input_picture[i]) {
1399  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1400  break;
1401  }
1402 
1403  s->input_picture[i]->f->pict_type =
1404  s->rc_context.entry[pict_num].new_pict_type;
1405  }
1406  }
1407 
1408  if (s->b_frame_strategy == 0) {
1409  b_frames = s->max_b_frames;
1410  while (b_frames && !s->input_picture[b_frames])
1411  b_frames--;
1412  } else if (s->b_frame_strategy == 1) {
1413  for (i = 1; i < s->max_b_frames + 1; i++) {
1414  if (s->input_picture[i] &&
1415  s->input_picture[i]->b_frame_score == 0) {
1416  s->input_picture[i]->b_frame_score =
1418  s->input_picture[i ]->f->data[0],
1419  s->input_picture[i - 1]->f->data[0],
1420  s->linesize) + 1;
1421  }
1422  }
1423  for (i = 0; i < s->max_b_frames + 1; i++) {
1424  if (!s->input_picture[i] ||
1425  s->input_picture[i]->b_frame_score - 1 >
1426  s->mb_num / s->b_sensitivity)
1427  break;
1428  }
1429 
1430  b_frames = FFMAX(0, i - 1);
1431 
1432  /* reset scores */
1433  for (i = 0; i < b_frames + 1; i++) {
1434  s->input_picture[i]->b_frame_score = 0;
1435  }
1436  } else if (s->b_frame_strategy == 2) {
1437  b_frames = estimate_best_b_count(s);
1438  if (b_frames < 0)
1439  return b_frames;
1440  }
1441 
1442  emms_c();
1443 
1444  for (i = b_frames - 1; i >= 0; i--) {
1445  int type = s->input_picture[i]->f->pict_type;
1446  if (type && type != AV_PICTURE_TYPE_B)
1447  b_frames = i;
1448  }
1449  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1450  b_frames == s->max_b_frames) {
1451  av_log(s->avctx, AV_LOG_ERROR,
1452  "warning, too many B-frames in a row\n");
1453  }
1454 
1455  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1456  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1457  s->gop_size > s->picture_in_gop_number) {
1458  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1459  } else {
1460  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1461  b_frames = 0;
1462  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1463  }
1464  }
1465 
1466  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1467  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1468  b_frames--;
1469 
1470  s->reordered_input_picture[0] = s->input_picture[b_frames];
1471  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1472  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1473  s->reordered_input_picture[0]->f->coded_picture_number =
1474  s->coded_picture_number++;
1475  for (i = 0; i < b_frames; i++) {
1476  s->reordered_input_picture[i + 1] = s->input_picture[i];
1477  s->reordered_input_picture[i + 1]->f->pict_type =
1479  s->reordered_input_picture[i + 1]->f->coded_picture_number =
1480  s->coded_picture_number++;
1481  }
1482  }
1483  }
1484 no_output_pic:
1485  av_frame_unref(s->new_picture);
1486 
1487  if (s->reordered_input_picture[0]) {
1488  s->reordered_input_picture[0]->reference =
1489  s->reordered_input_picture[0]->f->pict_type !=
1490  AV_PICTURE_TYPE_B ? 3 : 0;
1491 
1492  if ((ret = av_frame_ref(s->new_picture,
1493  s->reordered_input_picture[0]->f)))
1494  return ret;
1495 
1496  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1497  // input is a shared pix, so we can't modify it -> allocate a new
1498  // one & ensure that the shared one is reuseable
1499 
1500  Picture *pic;
1501  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1502  if (i < 0)
1503  return i;
1504  pic = &s->picture[i];
1505 
1506  pic->reference = s->reordered_input_picture[0]->reference;
1507  if (alloc_picture(s, pic, 0) < 0) {
1508  return -1;
1509  }
1510 
1511  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1512  if (ret < 0)
1513  return ret;
1514 
1515  /* mark us unused / free shared pic */
1516  av_frame_unref(s->reordered_input_picture[0]->f);
1517  s->reordered_input_picture[0]->shared = 0;
1518 
1519  s->current_picture_ptr = pic;
1520  } else {
1521  // input is not a shared pix -> reuse buffer for current_pix
1522  s->current_picture_ptr = s->reordered_input_picture[0];
1523  for (i = 0; i < 4; i++) {
1524  if (s->new_picture->data[i])
1525  s->new_picture->data[i] += INPLACE_OFFSET;
1526  }
1527  }
1528  s->picture_number = s->new_picture->display_picture_number;
1529  }
1530  return 0;
1531 }
1532 
1534 {
1535  if (s->unrestricted_mv &&
1536  s->current_picture.reference &&
1537  !s->intra_only) {
1538  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1539  int hshift = desc->log2_chroma_w;
1540  int vshift = desc->log2_chroma_h;
1541  s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1542  s->current_picture.f->linesize[0],
1543  s->h_edge_pos, s->v_edge_pos,
1545  EDGE_TOP | EDGE_BOTTOM);
1546  s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1547  s->current_picture.f->linesize[1],
1548  s->h_edge_pos >> hshift,
1549  s->v_edge_pos >> vshift,
1550  EDGE_WIDTH >> hshift,
1551  EDGE_WIDTH >> vshift,
1552  EDGE_TOP | EDGE_BOTTOM);
1553  s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1554  s->current_picture.f->linesize[2],
1555  s->h_edge_pos >> hshift,
1556  s->v_edge_pos >> vshift,
1557  EDGE_WIDTH >> hshift,
1558  EDGE_WIDTH >> vshift,
1559  EDGE_TOP | EDGE_BOTTOM);
1560  }
1561 
1562  emms_c();
1563 
1564  s->last_pict_type = s->pict_type;
1565  s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1566  if (s->pict_type!= AV_PICTURE_TYPE_B)
1567  s->last_non_b_pict_type = s->pict_type;
1568 }
1569 
1571 {
1572  int intra, i;
1573 
1574  for (intra = 0; intra < 2; intra++) {
1575  if (s->dct_count[intra] > (1 << 16)) {
1576  for (i = 0; i < 64; i++) {
1577  s->dct_error_sum[intra][i] >>= 1;
1578  }
1579  s->dct_count[intra] >>= 1;
1580  }
1581 
1582  for (i = 0; i < 64; i++) {
1583  s->dct_offset[intra][i] = (s->noise_reduction *
1584  s->dct_count[intra] +
1585  s->dct_error_sum[intra][i] / 2) /
1586  (s->dct_error_sum[intra][i] + 1);
1587  }
1588  }
1589 }
1590 
1592 {
1593  int ret;
1594 
1595  /* mark & release old frames */
1596  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1597  s->last_picture_ptr != s->next_picture_ptr &&
1598  s->last_picture_ptr->f->buf[0]) {
1599  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1600  }
1601 
1602  s->current_picture_ptr->f->pict_type = s->pict_type;
1603  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1604 
1605  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1606  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1607  s->current_picture_ptr)) < 0)
1608  return ret;
1609 
1610  if (s->pict_type != AV_PICTURE_TYPE_B) {
1611  s->last_picture_ptr = s->next_picture_ptr;
1612  s->next_picture_ptr = s->current_picture_ptr;
1613  }
1614 
1615  if (s->last_picture_ptr) {
1616  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1617  if (s->last_picture_ptr->f->buf[0] &&
1618  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1619  s->last_picture_ptr)) < 0)
1620  return ret;
1621  }
1622  if (s->next_picture_ptr) {
1623  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1624  if (s->next_picture_ptr->f->buf[0] &&
1625  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1626  s->next_picture_ptr)) < 0)
1627  return ret;
1628  }
1629 
1630  if (s->picture_structure!= PICT_FRAME) {
1631  int i;
1632  for (i = 0; i < 4; i++) {
1633  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1634  s->current_picture.f->data[i] +=
1635  s->current_picture.f->linesize[i];
1636  }
1637  s->current_picture.f->linesize[i] *= 2;
1638  s->last_picture.f->linesize[i] *= 2;
1639  s->next_picture.f->linesize[i] *= 2;
1640  }
1641  }
1642 
1643  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1644  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1645  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1646  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1647  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1648  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1649  } else {
1650  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1651  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1652  }
1653 
1654  if (s->dct_error_sum) {
1655  av_assert2(s->noise_reduction && s->encoding);
1657  }
1658 
1659  return 0;
1660 }
1661 
1663  const AVFrame *pic_arg, int *got_packet)
1664 {
1666  int i, stuffing_count, ret;
1667  int context_count = s->slice_context_count;
1668 
1669  s->vbv_ignore_qmax = 0;
1670 
1671  s->picture_in_gop_number++;
1672 
1673  if (load_input_picture(s, pic_arg) < 0)
1674  return -1;
1675 
1676  if (select_input_picture(s) < 0) {
1677  return -1;
1678  }
1679 
1680  /* output? */
1681  if (s->new_picture->data[0]) {
1682  int growing_buffer = context_count == 1 && !s->data_partitioning;
1683  size_t pkt_size = 10000 + s->mb_width * s->mb_height *
1684  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1685  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1686  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_picture, &pkt_size);
1687  if (ret < 0)
1688  return ret;
1689  }
1690  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1691  return ret;
1693  if (s->mb_info) {
1694  s->mb_info_ptr = av_packet_new_side_data(pkt,
1696  s->mb_width*s->mb_height*12);
1697  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1698  }
1699 
1700  for (i = 0; i < context_count; i++) {
1701  int start_y = s->thread_context[i]->start_mb_y;
1702  int end_y = s->thread_context[i]-> end_mb_y;
1703  int h = s->mb_height;
1704  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1705  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1706 
1707  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1708  }
1709 
1710  s->pict_type = s->new_picture->pict_type;
1711  //emms_c();
1712  ret = frame_start(s);
1713  if (ret < 0)
1714  return ret;
1715 vbv_retry:
1716  ret = encode_picture(s, s->picture_number);
1717  if (growing_buffer) {
1718  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1719  pkt->data = s->pb.buf;
1721  }
1722  if (ret < 0)
1723  return -1;
1724 
1725  frame_end(s);
1726 
1727  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1728  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1729 
1730  if (avctx->rc_buffer_size) {
1731  RateControlContext *rcc = &s->rc_context;
1732  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1733  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1734  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1735 
1736  if (put_bits_count(&s->pb) > max_size &&
1737  s->lambda < s->lmax) {
1738  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1739  (s->qscale + 1) / s->qscale);
1740  if (s->adaptive_quant) {
1741  int i;
1742  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1743  s->lambda_table[i] =
1744  FFMAX(s->lambda_table[i] + min_step,
1745  s->lambda_table[i] * (s->qscale + 1) /
1746  s->qscale);
1747  }
1748  s->mb_skipped = 0; // done in frame_start()
1749  // done in encode_picture() so we must undo it
1750  if (s->pict_type == AV_PICTURE_TYPE_P) {
1751  if (s->flipflop_rounding ||
1752  s->codec_id == AV_CODEC_ID_H263P ||
1753  s->codec_id == AV_CODEC_ID_MPEG4)
1754  s->no_rounding ^= 1;
1755  }
1756  if (s->pict_type != AV_PICTURE_TYPE_B) {
1757  s->time_base = s->last_time_base;
1758  s->last_non_b_time = s->time - s->pp_time;
1759  }
1760  for (i = 0; i < context_count; i++) {
1761  PutBitContext *pb = &s->thread_context[i]->pb;
1762  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1763  }
1764  s->vbv_ignore_qmax = 1;
1765  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1766  goto vbv_retry;
1767  }
1768 
1770  }
1771 
1774 
1775  for (i = 0; i < 4; i++) {
1776  s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1777  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1778  }
1779  ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1780  s->current_picture_ptr->encoding_error,
1782  s->pict_type);
1783 
1785  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1786  s->misc_bits + s->i_tex_bits +
1787  s->p_tex_bits);
1788  flush_put_bits(&s->pb);
1789  s->frame_bits = put_bits_count(&s->pb);
1790 
1791  stuffing_count = ff_vbv_update(s, s->frame_bits);
1792  s->stuffing_bits = 8*stuffing_count;
1793  if (stuffing_count) {
1794  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1795  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1796  return -1;
1797  }
1798 
1799  switch (s->codec_id) {
1802  while (stuffing_count--) {
1803  put_bits(&s->pb, 8, 0);
1804  }
1805  break;
1806  case AV_CODEC_ID_MPEG4:
1807  put_bits(&s->pb, 16, 0);
1808  put_bits(&s->pb, 16, 0x1C3);
1809  stuffing_count -= 4;
1810  while (stuffing_count--) {
1811  put_bits(&s->pb, 8, 0xFF);
1812  }
1813  break;
1814  default:
1815  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1816  s->stuffing_bits = 0;
1817  }
1818  flush_put_bits(&s->pb);
1819  s->frame_bits = put_bits_count(&s->pb);
1820  }
1821 
1822  /* update MPEG-1/2 vbv_delay for CBR */
1823  if (avctx->rc_max_rate &&
1825  s->out_format == FMT_MPEG1 &&
1826  90000LL * (avctx->rc_buffer_size - 1) <=
1827  avctx->rc_max_rate * 0xFFFFLL) {
1828  AVCPBProperties *props;
1829  size_t props_size;
1830 
1831  int vbv_delay, min_delay;
1832  double inbits = avctx->rc_max_rate *
1834  int minbits = s->frame_bits - 8 *
1835  (s->vbv_delay_pos - 1);
1836  double bits = s->rc_context.buffer_index + minbits - inbits;
1837  uint8_t *const vbv_delay_ptr = s->pb.buf + s->vbv_delay_pos;
1838 
1839  if (bits < 0)
1841  "Internal error, negative bits\n");
1842 
1843  av_assert1(s->repeat_first_field == 0);
1844 
1845  vbv_delay = bits * 90000 / avctx->rc_max_rate;
1846  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1847  avctx->rc_max_rate;
1848 
1849  vbv_delay = FFMAX(vbv_delay, min_delay);
1850 
1851  av_assert0(vbv_delay < 0xFFFF);
1852 
1853  vbv_delay_ptr[0] &= 0xF8;
1854  vbv_delay_ptr[0] |= vbv_delay >> 13;
1855  vbv_delay_ptr[1] = vbv_delay >> 5;
1856  vbv_delay_ptr[2] &= 0x07;
1857  vbv_delay_ptr[2] |= vbv_delay << 3;
1858 
1859  props = av_cpb_properties_alloc(&props_size);
1860  if (!props)
1861  return AVERROR(ENOMEM);
1862  props->vbv_delay = vbv_delay * 300;
1863 
1865  (uint8_t*)props, props_size);
1866  if (ret < 0) {
1867  av_freep(&props);
1868  return ret;
1869  }
1870  }
1871  s->total_bits += s->frame_bits;
1872 
1873  pkt->pts = s->current_picture.f->pts;
1874  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1875  if (!s->current_picture.f->coded_picture_number)
1876  pkt->dts = pkt->pts - s->dts_delta;
1877  else
1878  pkt->dts = s->reordered_pts;
1879  s->reordered_pts = pkt->pts;
1880  } else
1881  pkt->dts = pkt->pts;
1882  if (s->current_picture.f->key_frame)
1884  if (s->mb_info)
1886  } else {
1887  s->frame_bits = 0;
1888  }
1889 
1890  /* release non-reference frames */
1891  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1892  if (!s->picture[i].reference)
1893  ff_mpeg_unref_picture(avctx, &s->picture[i]);
1894  }
1895 
1896  av_assert1((s->frame_bits & 7) == 0);
1897 
1898  pkt->size = s->frame_bits / 8;
1899  *got_packet = !!pkt->size;
1900  return 0;
1901 }
1902 
1904  int n, int threshold)
1905 {
1906  static const char tab[64] = {
1907  3, 2, 2, 1, 1, 1, 1, 1,
1908  1, 1, 1, 1, 1, 1, 1, 1,
1909  1, 1, 1, 1, 1, 1, 1, 1,
1910  0, 0, 0, 0, 0, 0, 0, 0,
1911  0, 0, 0, 0, 0, 0, 0, 0,
1912  0, 0, 0, 0, 0, 0, 0, 0,
1913  0, 0, 0, 0, 0, 0, 0, 0,
1914  0, 0, 0, 0, 0, 0, 0, 0
1915  };
1916  int score = 0;
1917  int run = 0;
1918  int i;
1919  int16_t *block = s->block[n];
1920  const int last_index = s->block_last_index[n];
1921  int skip_dc;
1922 
1923  if (threshold < 0) {
1924  skip_dc = 0;
1925  threshold = -threshold;
1926  } else
1927  skip_dc = 1;
1928 
1929  /* Are all we could set to zero already zero? */
1930  if (last_index <= skip_dc - 1)
1931  return;
1932 
1933  for (i = 0; i <= last_index; i++) {
1934  const int j = s->intra_scantable.permutated[i];
1935  const int level = FFABS(block[j]);
1936  if (level == 1) {
1937  if (skip_dc && i == 0)
1938  continue;
1939  score += tab[run];
1940  run = 0;
1941  } else if (level > 1) {
1942  return;
1943  } else {
1944  run++;
1945  }
1946  }
1947  if (score >= threshold)
1948  return;
1949  for (i = skip_dc; i <= last_index; i++) {
1950  const int j = s->intra_scantable.permutated[i];
1951  block[j] = 0;
1952  }
1953  if (block[0])
1954  s->block_last_index[n] = 0;
1955  else
1956  s->block_last_index[n] = -1;
1957 }
1958 
1959 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1960  int last_index)
1961 {
1962  int i;
1963  const int maxlevel = s->max_qcoeff;
1964  const int minlevel = s->min_qcoeff;
1965  int overflow = 0;
1966 
1967  if (s->mb_intra) {
1968  i = 1; // skip clipping of intra dc
1969  } else
1970  i = 0;
1971 
1972  for (; i <= last_index; i++) {
1973  const int j = s->intra_scantable.permutated[i];
1974  int level = block[j];
1975 
1976  if (level > maxlevel) {
1977  level = maxlevel;
1978  overflow++;
1979  } else if (level < minlevel) {
1980  level = minlevel;
1981  overflow++;
1982  }
1983 
1984  block[j] = level;
1985  }
1986 
1987  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1988  av_log(s->avctx, AV_LOG_INFO,
1989  "warning, clipping %d dct coefficients to %d..%d\n",
1990  overflow, minlevel, maxlevel);
1991 }
1992 
1993 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1994 {
1995  int x, y;
1996  // FIXME optimize
1997  for (y = 0; y < 8; y++) {
1998  for (x = 0; x < 8; x++) {
1999  int x2, y2;
2000  int sum = 0;
2001  int sqr = 0;
2002  int count = 0;
2003 
2004  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2005  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2006  int v = ptr[x2 + y2 * stride];
2007  sum += v;
2008  sqr += v * v;
2009  count++;
2010  }
2011  }
2012  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2013  }
2014  }
2015 }
2016 
2018  int motion_x, int motion_y,
2019  int mb_block_height,
2020  int mb_block_width,
2021  int mb_block_count,
2022  int chroma_x_shift,
2023  int chroma_y_shift,
2024  int chroma_format)
2025 {
2026 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2027  * and neither of these encoders currently supports 444. */
2028 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2029  (s)->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2030  int16_t weight[12][64];
2031  int16_t orig[12][64];
2032  const int mb_x = s->mb_x;
2033  const int mb_y = s->mb_y;
2034  int i;
2035  int skip_dct[12];
2036  int dct_offset = s->linesize * 8; // default for progressive frames
2037  int uv_dct_offset = s->uvlinesize * 8;
2038  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2039  ptrdiff_t wrap_y, wrap_c;
2040 
2041  for (i = 0; i < mb_block_count; i++)
2042  skip_dct[i] = s->skipdct;
2043 
2044  if (s->adaptive_quant) {
2045  const int last_qp = s->qscale;
2046  const int mb_xy = mb_x + mb_y * s->mb_stride;
2047 
2048  s->lambda = s->lambda_table[mb_xy];
2049  update_qscale(s);
2050 
2051  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2052  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2053  s->dquant = s->qscale - last_qp;
2054 
2055  if (s->out_format == FMT_H263) {
2056  s->dquant = av_clip(s->dquant, -2, 2);
2057 
2058  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2059  if (!s->mb_intra) {
2060  if (s->pict_type == AV_PICTURE_TYPE_B) {
2061  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2062  s->dquant = 0;
2063  }
2064  if (s->mv_type == MV_TYPE_8X8)
2065  s->dquant = 0;
2066  }
2067  }
2068  }
2069  }
2070  ff_set_qscale(s, last_qp + s->dquant);
2071  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2072  ff_set_qscale(s, s->qscale + s->dquant);
2073 
2074  wrap_y = s->linesize;
2075  wrap_c = s->uvlinesize;
2076  ptr_y = s->new_picture->data[0] +
2077  (mb_y * 16 * wrap_y) + mb_x * 16;
2078  ptr_cb = s->new_picture->data[1] +
2079  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2080  ptr_cr = s->new_picture->data[2] +
2081  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2082 
2083  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2084  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2085  int cw = (s->width + chroma_x_shift) >> chroma_x_shift;
2086  int ch = (s->height + chroma_y_shift) >> chroma_y_shift;
2087  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2088  wrap_y, wrap_y,
2089  16, 16, mb_x * 16, mb_y * 16,
2090  s->width, s->height);
2091  ptr_y = ebuf;
2092  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2093  wrap_c, wrap_c,
2094  mb_block_width, mb_block_height,
2095  mb_x * mb_block_width, mb_y * mb_block_height,
2096  cw, ch);
2097  ptr_cb = ebuf + 16 * wrap_y;
2098  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2099  wrap_c, wrap_c,
2100  mb_block_width, mb_block_height,
2101  mb_x * mb_block_width, mb_y * mb_block_height,
2102  cw, ch);
2103  ptr_cr = ebuf + 16 * wrap_y + 16;
2104  }
2105 
2106  if (s->mb_intra) {
2107  if (INTERLACED_DCT(s)) {
2108  int progressive_score, interlaced_score;
2109 
2110  s->interlaced_dct = 0;
2111  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2112  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2113  NULL, wrap_y, 8) - 400;
2114 
2115  if (progressive_score > 0) {
2116  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2117  NULL, wrap_y * 2, 8) +
2118  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2119  NULL, wrap_y * 2, 8);
2120  if (progressive_score > interlaced_score) {
2121  s->interlaced_dct = 1;
2122 
2123  dct_offset = wrap_y;
2124  uv_dct_offset = wrap_c;
2125  wrap_y <<= 1;
2126  if (chroma_format == CHROMA_422 ||
2128  wrap_c <<= 1;
2129  }
2130  }
2131  }
2132 
2133  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2134  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2135  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2136  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2137 
2138  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2139  skip_dct[4] = 1;
2140  skip_dct[5] = 1;
2141  } else {
2142  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2143  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2144  if (chroma_format == CHROMA_422) {
2145  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2146  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2147  } else if (chroma_format == CHROMA_444) {
2148  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2149  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2150  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2151  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2152  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2153  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2154  }
2155  }
2156  } else {
2157  op_pixels_func (*op_pix)[4];
2158  qpel_mc_func (*op_qpix)[16];
2159  uint8_t *dest_y, *dest_cb, *dest_cr;
2160 
2161  dest_y = s->dest[0];
2162  dest_cb = s->dest[1];
2163  dest_cr = s->dest[2];
2164 
2165  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2166  op_pix = s->hdsp.put_pixels_tab;
2167  op_qpix = s->qdsp.put_qpel_pixels_tab;
2168  } else {
2169  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2170  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2171  }
2172 
2173  if (s->mv_dir & MV_DIR_FORWARD) {
2174  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2175  s->last_picture.f->data,
2176  op_pix, op_qpix);
2177  op_pix = s->hdsp.avg_pixels_tab;
2178  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2179  }
2180  if (s->mv_dir & MV_DIR_BACKWARD) {
2181  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2182  s->next_picture.f->data,
2183  op_pix, op_qpix);
2184  }
2185 
2186  if (INTERLACED_DCT(s)) {
2187  int progressive_score, interlaced_score;
2188 
2189  s->interlaced_dct = 0;
2190  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2191  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2192  ptr_y + wrap_y * 8,
2193  wrap_y, 8) - 400;
2194 
2195  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2196  progressive_score -= 400;
2197 
2198  if (progressive_score > 0) {
2199  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2200  wrap_y * 2, 8) +
2201  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2202  ptr_y + wrap_y,
2203  wrap_y * 2, 8);
2204 
2205  if (progressive_score > interlaced_score) {
2206  s->interlaced_dct = 1;
2207 
2208  dct_offset = wrap_y;
2209  uv_dct_offset = wrap_c;
2210  wrap_y <<= 1;
2211  if (chroma_format == CHROMA_422)
2212  wrap_c <<= 1;
2213  }
2214  }
2215  }
2216 
2217  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2218  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2219  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2220  dest_y + dct_offset, wrap_y);
2221  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2222  dest_y + dct_offset + 8, wrap_y);
2223 
2224  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2225  skip_dct[4] = 1;
2226  skip_dct[5] = 1;
2227  } else {
2228  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2229  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2230  if (!chroma_y_shift) { /* 422 */
2231  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2232  dest_cb + uv_dct_offset, wrap_c);
2233  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2234  dest_cr + uv_dct_offset, wrap_c);
2235  }
2236  }
2237  /* pre quantization */
2238  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2239  2 * s->qscale * s->qscale) {
2240  // FIXME optimize
2241  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2242  skip_dct[0] = 1;
2243  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2244  skip_dct[1] = 1;
2245  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2246  wrap_y, 8) < 20 * s->qscale)
2247  skip_dct[2] = 1;
2248  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2249  wrap_y, 8) < 20 * s->qscale)
2250  skip_dct[3] = 1;
2251  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2252  skip_dct[4] = 1;
2253  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2254  skip_dct[5] = 1;
2255  if (!chroma_y_shift) { /* 422 */
2256  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2257  dest_cb + uv_dct_offset,
2258  wrap_c, 8) < 20 * s->qscale)
2259  skip_dct[6] = 1;
2260  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2261  dest_cr + uv_dct_offset,
2262  wrap_c, 8) < 20 * s->qscale)
2263  skip_dct[7] = 1;
2264  }
2265  }
2266  }
2267 
2268  if (s->quantizer_noise_shaping) {
2269  if (!skip_dct[0])
2270  get_visual_weight(weight[0], ptr_y , wrap_y);
2271  if (!skip_dct[1])
2272  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2273  if (!skip_dct[2])
2274  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2275  if (!skip_dct[3])
2276  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2277  if (!skip_dct[4])
2278  get_visual_weight(weight[4], ptr_cb , wrap_c);
2279  if (!skip_dct[5])
2280  get_visual_weight(weight[5], ptr_cr , wrap_c);
2281  if (!chroma_y_shift) { /* 422 */
2282  if (!skip_dct[6])
2283  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2284  wrap_c);
2285  if (!skip_dct[7])
2286  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2287  wrap_c);
2288  }
2289  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2290  }
2291 
2292  /* DCT & quantize */
2293  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2294  {
2295  for (i = 0; i < mb_block_count; i++) {
2296  if (!skip_dct[i]) {
2297  int overflow;
2298  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2299  // FIXME we could decide to change to quantizer instead of
2300  // clipping
2301  // JS: I don't think that would be a good idea it could lower
2302  // quality instead of improve it. Just INTRADC clipping
2303  // deserves changes in quantizer
2304  if (overflow)
2305  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2306  } else
2307  s->block_last_index[i] = -1;
2308  }
2309  if (s->quantizer_noise_shaping) {
2310  for (i = 0; i < mb_block_count; i++) {
2311  if (!skip_dct[i]) {
2312  s->block_last_index[i] =
2313  dct_quantize_refine(s, s->block[i], weight[i],
2314  orig[i], i, s->qscale);
2315  }
2316  }
2317  }
2318 
2319  if (s->luma_elim_threshold && !s->mb_intra)
2320  for (i = 0; i < 4; i++)
2321  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2322  if (s->chroma_elim_threshold && !s->mb_intra)
2323  for (i = 4; i < mb_block_count; i++)
2324  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2325 
2326  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2327  for (i = 0; i < mb_block_count; i++) {
2328  if (s->block_last_index[i] == -1)
2329  s->coded_score[i] = INT_MAX / 256;
2330  }
2331  }
2332  }
2333 
2334  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2335  s->block_last_index[4] =
2336  s->block_last_index[5] = 0;
2337  s->block[4][0] =
2338  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2339  if (!chroma_y_shift) { /* 422 / 444 */
2340  for (i=6; i<12; i++) {
2341  s->block_last_index[i] = 0;
2342  s->block[i][0] = s->block[4][0];
2343  }
2344  }
2345  }
2346 
2347  // non c quantize code returns incorrect block_last_index FIXME
2348  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2349  for (i = 0; i < mb_block_count; i++) {
2350  int j;
2351  if (s->block_last_index[i] > 0) {
2352  for (j = 63; j > 0; j--) {
2353  if (s->block[i][s->intra_scantable.permutated[j]])
2354  break;
2355  }
2356  s->block_last_index[i] = j;
2357  }
2358  }
2359  }
2360 
2361  /* huffman encode */
2362  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2365  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2366  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2367  break;
2368  case AV_CODEC_ID_MPEG4:
2369  if (CONFIG_MPEG4_ENCODER)
2370  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2371  break;
2372  case AV_CODEC_ID_MSMPEG4V2:
2373  case AV_CODEC_ID_MSMPEG4V3:
2374  case AV_CODEC_ID_WMV1:
2376  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2377  break;
2378  case AV_CODEC_ID_WMV2:
2379  if (CONFIG_WMV2_ENCODER)
2380  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2381  break;
2382  case AV_CODEC_ID_H261:
2383  if (CONFIG_H261_ENCODER)
2384  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2385  break;
2386  case AV_CODEC_ID_H263:
2387  case AV_CODEC_ID_H263P:
2388  case AV_CODEC_ID_FLV1:
2389  case AV_CODEC_ID_RV10:
2390  case AV_CODEC_ID_RV20:
2391  if (CONFIG_H263_ENCODER)
2392  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2393  break;
2394 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2395  case AV_CODEC_ID_MJPEG:
2396  case AV_CODEC_ID_AMV:
2397  ff_mjpeg_encode_mb(s, s->block);
2398  break;
2399 #endif
2400  case AV_CODEC_ID_SPEEDHQ:
2401  if (CONFIG_SPEEDHQ_ENCODER)
2402  ff_speedhq_encode_mb(s, s->block);
2403  break;
2404  default:
2405  av_assert1(0);
2406  }
2407 }
2408 
2409 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2410 {
2411  if (s->chroma_format == CHROMA_420)
2412  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2413  else if (s->chroma_format == CHROMA_422)
2414  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2415  else
2416  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2417 }
2418 
2420  const MpegEncContext *s)
2421 {
2422  int i;
2423 
2424  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2425 
2426  /* MPEG-1 */
2427  d->mb_skip_run= s->mb_skip_run;
2428  for(i=0; i<3; i++)
2429  d->last_dc[i] = s->last_dc[i];
2430 
2431  /* statistics */
2432  d->mv_bits= s->mv_bits;
2433  d->i_tex_bits= s->i_tex_bits;
2434  d->p_tex_bits= s->p_tex_bits;
2435  d->i_count= s->i_count;
2436  d->skip_count= s->skip_count;
2437  d->misc_bits= s->misc_bits;
2438  d->last_bits= 0;
2439 
2440  d->mb_skipped= 0;
2441  d->qscale= s->qscale;
2442  d->dquant= s->dquant;
2443 
2444  d->esc3_level_length= s->esc3_level_length;
2445 }
2446 
2448  const MpegEncContext *s)
2449 {
2450  int i;
2451 
2452  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2453  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2454 
2455  /* MPEG-1 */
2456  d->mb_skip_run= s->mb_skip_run;
2457  for(i=0; i<3; i++)
2458  d->last_dc[i] = s->last_dc[i];
2459 
2460  /* statistics */
2461  d->mv_bits= s->mv_bits;
2462  d->i_tex_bits= s->i_tex_bits;
2463  d->p_tex_bits= s->p_tex_bits;
2464  d->i_count= s->i_count;
2465  d->skip_count= s->skip_count;
2466  d->misc_bits= s->misc_bits;
2467 
2468  d->mb_intra= s->mb_intra;
2469  d->mb_skipped= s->mb_skipped;
2470  d->mv_type= s->mv_type;
2471  d->mv_dir= s->mv_dir;
2472  d->pb= s->pb;
2473  if(s->data_partitioning){
2474  d->pb2= s->pb2;
2475  d->tex_pb= s->tex_pb;
2476  }
2477  d->block= s->block;
2478  for(i=0; i<8; i++)
2479  d->block_last_index[i]= s->block_last_index[i];
2480  d->interlaced_dct= s->interlaced_dct;
2481  d->qscale= s->qscale;
2482 
2483  d->esc3_level_length= s->esc3_level_length;
2484 }
2485 
2486 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best,
2488  int *dmin, int *next_block, int motion_x, int motion_y)
2489 {
2490  int score;
2491  uint8_t *dest_backup[3];
2492 
2493  copy_context_before_encode(s, backup);
2494 
2495  s->block= s->blocks[*next_block];
2496  s->pb= pb[*next_block];
2497  if(s->data_partitioning){
2498  s->pb2 = pb2 [*next_block];
2499  s->tex_pb= tex_pb[*next_block];
2500  }
2501 
2502  if(*next_block){
2503  memcpy(dest_backup, s->dest, sizeof(s->dest));
2504  s->dest[0] = s->sc.rd_scratchpad;
2505  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2506  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2507  av_assert0(s->linesize >= 32); //FIXME
2508  }
2509 
2510  encode_mb(s, motion_x, motion_y);
2511 
2512  score= put_bits_count(&s->pb);
2513  if(s->data_partitioning){
2514  score+= put_bits_count(&s->pb2);
2515  score+= put_bits_count(&s->tex_pb);
2516  }
2517 
2518  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2519  ff_mpv_reconstruct_mb(s, s->block);
2520 
2521  score *= s->lambda2;
2522  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2523  }
2524 
2525  if(*next_block){
2526  memcpy(s->dest, dest_backup, sizeof(s->dest));
2527  }
2528 
2529  if(score<*dmin){
2530  *dmin= score;
2531  *next_block^=1;
2532 
2534  }
2535 }
2536 
2537 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2538  const uint32_t *sq = ff_square_tab + 256;
2539  int acc=0;
2540  int x,y;
2541 
2542  if(w==16 && h==16)
2543  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2544  else if(w==8 && h==8)
2545  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2546 
2547  for(y=0; y<h; y++){
2548  for(x=0; x<w; x++){
2549  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2550  }
2551  }
2552 
2553  av_assert2(acc>=0);
2554 
2555  return acc;
2556 }
2557 
2558 static int sse_mb(MpegEncContext *s){
2559  int w= 16;
2560  int h= 16;
2561  int chroma_mb_w = w >> s->chroma_x_shift;
2562  int chroma_mb_h = h >> s->chroma_y_shift;
2563 
2564  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2565  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2566 
2567  if(w==16 && h==16)
2568  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2569  return s->mecc.nsse[0](s, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2570  s->dest[0], s->linesize, 16) +
2571  s->mecc.nsse[1](s, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2572  s->dest[1], s->uvlinesize, chroma_mb_h) +
2573  s->mecc.nsse[1](s, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2574  s->dest[2], s->uvlinesize, chroma_mb_h);
2575  }else{
2576  return s->mecc.sse[0](NULL, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2577  s->dest[0], s->linesize, 16) +
2578  s->mecc.sse[1](NULL, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2579  s->dest[1], s->uvlinesize, chroma_mb_h) +
2580  s->mecc.sse[1](NULL, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2581  s->dest[2], s->uvlinesize, chroma_mb_h);
2582  }
2583  else
2584  return sse(s, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2585  s->dest[0], w, h, s->linesize) +
2586  sse(s, s->new_picture->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2587  s->dest[1], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize) +
2588  sse(s, s->new_picture->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2589  s->dest[2], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize);
2590 }
2591 
2593  MpegEncContext *s= *(void**)arg;
2594 
2595 
2596  s->me.pre_pass=1;
2597  s->me.dia_size= s->avctx->pre_dia_size;
2598  s->first_slice_line=1;
2599  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2600  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2601  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2602  }
2603  s->first_slice_line=0;
2604  }
2605 
2606  s->me.pre_pass=0;
2607 
2608  return 0;
2609 }
2610 
2612  MpegEncContext *s= *(void**)arg;
2613 
2614  s->me.dia_size= s->avctx->dia_size;
2615  s->first_slice_line=1;
2616  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2617  s->mb_x=0; //for block init below
2619  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2620  s->block_index[0]+=2;
2621  s->block_index[1]+=2;
2622  s->block_index[2]+=2;
2623  s->block_index[3]+=2;
2624 
2625  /* compute motion vector & mb_type and store in context */
2626  if(s->pict_type==AV_PICTURE_TYPE_B)
2627  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2628  else
2629  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2630  }
2631  s->first_slice_line=0;
2632  }
2633  return 0;
2634 }
2635 
2636 static int mb_var_thread(AVCodecContext *c, void *arg){
2637  MpegEncContext *s= *(void**)arg;
2638  int mb_x, mb_y;
2639 
2640  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2641  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2642  int xx = mb_x * 16;
2643  int yy = mb_y * 16;
2644  uint8_t *pix = s->new_picture->data[0] + (yy * s->linesize) + xx;
2645  int varc;
2646  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2647 
2648  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2649  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2650 
2651  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2652  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2653  s->me.mb_var_sum_temp += varc;
2654  }
2655  }
2656  return 0;
2657 }
2658 
2660  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2661  if(s->partitioned_frame){
2663  }
2664 
2665  ff_mpeg4_stuffing(&s->pb);
2666  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2667  s->out_format == FMT_MJPEG) {
2669  } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2671  }
2672 
2673  flush_put_bits(&s->pb);
2674 
2675  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2676  s->misc_bits+= get_bits_diff(s);
2677 }
2678 
2680 {
2681  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2682  int offset = put_bits_count(&s->pb);
2683  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2684  int gobn = s->mb_y / s->gob_index;
2685  int pred_x, pred_y;
2686  if (CONFIG_H263_ENCODER)
2687  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2688  bytestream_put_le32(&ptr, offset);
2689  bytestream_put_byte(&ptr, s->qscale);
2690  bytestream_put_byte(&ptr, gobn);
2691  bytestream_put_le16(&ptr, mba);
2692  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2693  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2694  /* 4MV not implemented */
2695  bytestream_put_byte(&ptr, 0); /* hmv2 */
2696  bytestream_put_byte(&ptr, 0); /* vmv2 */
2697 }
2698 
2699 static void update_mb_info(MpegEncContext *s, int startcode)
2700 {
2701  if (!s->mb_info)
2702  return;
2703  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2704  s->mb_info_size += 12;
2705  s->prev_mb_info = s->last_mb_info;
2706  }
2707  if (startcode) {
2708  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2709  /* This might have incremented mb_info_size above, and we return without
2710  * actually writing any info into that slot yet. But in that case,
2711  * this will be called again at the start of the after writing the
2712  * start code, actually writing the mb info. */
2713  return;
2714  }
2715 
2716  s->last_mb_info = put_bytes_count(&s->pb, 0);
2717  if (!s->mb_info_size)
2718  s->mb_info_size += 12;
2719  write_mb_info(s);
2720 }
2721 
2722 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2723 {
2724  if (put_bytes_left(&s->pb, 0) < threshold
2725  && s->slice_context_count == 1
2726  && s->pb.buf == s->avctx->internal->byte_buffer) {
2727  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2728 
2729  uint8_t *new_buffer = NULL;
2730  int new_buffer_size = 0;
2731 
2732  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2733  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2734  return AVERROR(ENOMEM);
2735  }
2736 
2737  emms_c();
2738 
2739  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2740  s->avctx->internal->byte_buffer_size + size_increase);
2741  if (!new_buffer)
2742  return AVERROR(ENOMEM);
2743 
2744  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2745  av_free(s->avctx->internal->byte_buffer);
2746  s->avctx->internal->byte_buffer = new_buffer;
2747  s->avctx->internal->byte_buffer_size = new_buffer_size;
2748  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2749  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2750  }
2751  if (put_bytes_left(&s->pb, 0) < threshold)
2752  return AVERROR(EINVAL);
2753  return 0;
2754 }
2755 
2756 static int encode_thread(AVCodecContext *c, void *arg){
2757  MpegEncContext *s= *(void**)arg;
2758  int mb_x, mb_y, mb_y_order;
2759  int chr_h= 16>>s->chroma_y_shift;
2760  int i, j;
2761  MpegEncContext best_s = { 0 }, backup_s;
2762  uint8_t bit_buf[2][MAX_MB_BYTES];
2763  uint8_t bit_buf2[2][MAX_MB_BYTES];
2764  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2765  PutBitContext pb[2], pb2[2], tex_pb[2];
2766 
2767  for(i=0; i<2; i++){
2768  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2769  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2770  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2771  }
2772 
2773  s->last_bits= put_bits_count(&s->pb);
2774  s->mv_bits=0;
2775  s->misc_bits=0;
2776  s->i_tex_bits=0;
2777  s->p_tex_bits=0;
2778  s->i_count=0;
2779  s->skip_count=0;
2780 
2781  for(i=0; i<3; i++){
2782  /* init last dc values */
2783  /* note: quant matrix value (8) is implied here */
2784  s->last_dc[i] = 128 << s->intra_dc_precision;
2785 
2786  s->current_picture.encoding_error[i] = 0;
2787  }
2788  if(s->codec_id==AV_CODEC_ID_AMV){
2789  s->last_dc[0] = 128*8/13;
2790  s->last_dc[1] = 128*8/14;
2791  s->last_dc[2] = 128*8/14;
2792  }
2793  s->mb_skip_run = 0;
2794  memset(s->last_mv, 0, sizeof(s->last_mv));
2795 
2796  s->last_mv_dir = 0;
2797 
2798  switch(s->codec_id){
2799  case AV_CODEC_ID_H263:
2800  case AV_CODEC_ID_H263P:
2801  case AV_CODEC_ID_FLV1:
2802  if (CONFIG_H263_ENCODER)
2803  s->gob_index = H263_GOB_HEIGHT(s->height);
2804  break;
2805  case AV_CODEC_ID_MPEG4:
2806  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2808  break;
2809  }
2810 
2811  s->resync_mb_x=0;
2812  s->resync_mb_y=0;
2813  s->first_slice_line = 1;
2814  s->ptr_lastgob = s->pb.buf;
2815  for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2816  if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2817  int first_in_slice;
2818  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2819  if (first_in_slice && mb_y_order != s->start_mb_y)
2821  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2822  } else {
2823  mb_y = mb_y_order;
2824  }
2825  s->mb_x=0;
2826  s->mb_y= mb_y;
2827 
2828  ff_set_qscale(s, s->qscale);
2830 
2831  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2832  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2833  int mb_type= s->mb_type[xy];
2834 // int d;
2835  int dmin= INT_MAX;
2836  int dir;
2837  int size_increase = s->avctx->internal->byte_buffer_size/4
2838  + s->mb_width*MAX_MB_BYTES;
2839 
2841  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2842  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2843  return -1;
2844  }
2845  if(s->data_partitioning){
2846  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2847  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2848  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2849  return -1;
2850  }
2851  }
2852 
2853  s->mb_x = mb_x;
2854  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2856 
2857  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2859  xy= s->mb_y*s->mb_stride + s->mb_x;
2860  mb_type= s->mb_type[xy];
2861  }
2862 
2863  /* write gob / video packet header */
2864  if(s->rtp_mode){
2865  int current_packet_size, is_gob_start;
2866 
2867  current_packet_size = put_bytes_count(&s->pb, 1)
2868  - (s->ptr_lastgob - s->pb.buf);
2869 
2870  is_gob_start = s->rtp_payload_size &&
2871  current_packet_size >= s->rtp_payload_size &&
2872  mb_y + mb_x > 0;
2873 
2874  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2875 
2876  switch(s->codec_id){
2877  case AV_CODEC_ID_H263:
2878  case AV_CODEC_ID_H263P:
2879  if(!s->h263_slice_structured)
2880  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2881  break;
2883  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2885  if(s->mb_skip_run) is_gob_start=0;
2886  break;
2887  case AV_CODEC_ID_MJPEG:
2888  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2889  break;
2890  }
2891 
2892  if(is_gob_start){
2893  if(s->start_mb_y != mb_y || mb_x!=0){
2894  write_slice_end(s);
2895 
2896  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2898  }
2899  }
2900 
2901  av_assert2((put_bits_count(&s->pb)&7) == 0);
2902  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2903 
2904  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2905  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
2906  int d = 100 / s->error_rate;
2907  if(r % d == 0){
2908  current_packet_size=0;
2909  s->pb.buf_ptr= s->ptr_lastgob;
2910  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2911  }
2912  }
2913 
2914  switch(s->codec_id){
2915  case AV_CODEC_ID_MPEG4:
2916  if (CONFIG_MPEG4_ENCODER) {
2919  }
2920  break;
2923  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2926  }
2927  break;
2928  case AV_CODEC_ID_H263:
2929  case AV_CODEC_ID_H263P:
2930  if (CONFIG_H263_ENCODER) {
2931  update_mb_info(s, 1);
2933  }
2934  break;
2935  }
2936 
2937  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
2938  int bits= put_bits_count(&s->pb);
2939  s->misc_bits+= bits - s->last_bits;
2940  s->last_bits= bits;
2941  }
2942 
2943  s->ptr_lastgob += current_packet_size;
2944  s->first_slice_line=1;
2945  s->resync_mb_x=mb_x;
2946  s->resync_mb_y=mb_y;
2947  }
2948  }
2949 
2950  if( (s->resync_mb_x == s->mb_x)
2951  && s->resync_mb_y+1 == s->mb_y){
2952  s->first_slice_line=0;
2953  }
2954 
2955  s->mb_skipped=0;
2956  s->dquant=0; //only for QP_RD
2957 
2958  update_mb_info(s, 0);
2959 
2960  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2961  int next_block=0;
2962  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2963 
2964  copy_context_before_encode(&backup_s, s);
2965  backup_s.pb= s->pb;
2966  best_s.data_partitioning= s->data_partitioning;
2967  best_s.partitioned_frame= s->partitioned_frame;
2968  if(s->data_partitioning){
2969  backup_s.pb2= s->pb2;
2970  backup_s.tex_pb= s->tex_pb;
2971  }
2972 
2974  s->mv_dir = MV_DIR_FORWARD;
2975  s->mv_type = MV_TYPE_16X16;
2976  s->mb_intra= 0;
2977  s->mv[0][0][0] = s->p_mv_table[xy][0];
2978  s->mv[0][0][1] = s->p_mv_table[xy][1];
2979  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
2980  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2981  }
2983  s->mv_dir = MV_DIR_FORWARD;
2984  s->mv_type = MV_TYPE_FIELD;
2985  s->mb_intra= 0;
2986  for(i=0; i<2; i++){
2987  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2988  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2989  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2990  }
2991  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
2992  &dmin, &next_block, 0, 0);
2993  }
2995  s->mv_dir = MV_DIR_FORWARD;
2996  s->mv_type = MV_TYPE_16X16;
2997  s->mb_intra= 0;
2998  s->mv[0][0][0] = 0;
2999  s->mv[0][0][1] = 0;
3000  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3001  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3002  }
3004  s->mv_dir = MV_DIR_FORWARD;
3005  s->mv_type = MV_TYPE_8X8;
3006  s->mb_intra= 0;
3007  for(i=0; i<4; i++){
3008  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3009  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3010  }
3011  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3012  &dmin, &next_block, 0, 0);
3013  }
3015  s->mv_dir = MV_DIR_FORWARD;
3016  s->mv_type = MV_TYPE_16X16;
3017  s->mb_intra= 0;
3018  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3019  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3020  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3021  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3022  }
3024  s->mv_dir = MV_DIR_BACKWARD;
3025  s->mv_type = MV_TYPE_16X16;
3026  s->mb_intra= 0;
3027  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3028  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3029  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3030  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3031  }
3033  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3034  s->mv_type = MV_TYPE_16X16;
3035  s->mb_intra= 0;
3036  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3037  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3038  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3039  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3040  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3041  &dmin, &next_block, 0, 0);
3042  }
3044  s->mv_dir = MV_DIR_FORWARD;
3045  s->mv_type = MV_TYPE_FIELD;
3046  s->mb_intra= 0;
3047  for(i=0; i<2; i++){
3048  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3049  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3050  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3051  }
3052  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3053  &dmin, &next_block, 0, 0);
3054  }
3056  s->mv_dir = MV_DIR_BACKWARD;
3057  s->mv_type = MV_TYPE_FIELD;
3058  s->mb_intra= 0;
3059  for(i=0; i<2; i++){
3060  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3061  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3062  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3063  }
3064  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3065  &dmin, &next_block, 0, 0);
3066  }
3068  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3069  s->mv_type = MV_TYPE_FIELD;
3070  s->mb_intra= 0;
3071  for(dir=0; dir<2; dir++){
3072  for(i=0; i<2; i++){
3073  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3074  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3075  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3076  }
3077  }
3078  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3079  &dmin, &next_block, 0, 0);
3080  }
3082  s->mv_dir = 0;
3083  s->mv_type = MV_TYPE_16X16;
3084  s->mb_intra= 1;
3085  s->mv[0][0][0] = 0;
3086  s->mv[0][0][1] = 0;
3087  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3088  &dmin, &next_block, 0, 0);
3089  if(s->h263_pred || s->h263_aic){
3090  if(best_s.mb_intra)
3091  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3092  else
3093  ff_clean_intra_table_entries(s); //old mode?
3094  }
3095  }
3096 
3097  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3098  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3099  const int last_qp= backup_s.qscale;
3100  int qpi, qp, dc[6];
3101  int16_t ac[6][16];
3102  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3103  static const int dquant_tab[4]={-1,1,-2,2};
3104  int storecoefs = s->mb_intra && s->dc_val[0];
3105 
3106  av_assert2(backup_s.dquant == 0);
3107 
3108  //FIXME intra
3109  s->mv_dir= best_s.mv_dir;
3110  s->mv_type = MV_TYPE_16X16;
3111  s->mb_intra= best_s.mb_intra;
3112  s->mv[0][0][0] = best_s.mv[0][0][0];
3113  s->mv[0][0][1] = best_s.mv[0][0][1];
3114  s->mv[1][0][0] = best_s.mv[1][0][0];
3115  s->mv[1][0][1] = best_s.mv[1][0][1];
3116 
3117  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3118  for(; qpi<4; qpi++){
3119  int dquant= dquant_tab[qpi];
3120  qp= last_qp + dquant;
3121  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3122  continue;
3123  backup_s.dquant= dquant;
3124  if(storecoefs){
3125  for(i=0; i<6; i++){
3126  dc[i]= s->dc_val[0][ s->block_index[i] ];
3127  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3128  }
3129  }
3130 
3131  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3132  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3133  if(best_s.qscale != qp){
3134  if(storecoefs){
3135  for(i=0; i<6; i++){
3136  s->dc_val[0][ s->block_index[i] ]= dc[i];
3137  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3138  }
3139  }
3140  }
3141  }
3142  }
3143  }
3144  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3145  int mx= s->b_direct_mv_table[xy][0];
3146  int my= s->b_direct_mv_table[xy][1];
3147 
3148  backup_s.dquant = 0;
3149  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3150  s->mb_intra= 0;
3151  ff_mpeg4_set_direct_mv(s, mx, my);
3152  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3153  &dmin, &next_block, mx, my);
3154  }
3155  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3156  backup_s.dquant = 0;
3157  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3158  s->mb_intra= 0;
3159  ff_mpeg4_set_direct_mv(s, 0, 0);
3160  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3161  &dmin, &next_block, 0, 0);
3162  }
3163  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3164  int coded=0;
3165  for(i=0; i<6; i++)
3166  coded |= s->block_last_index[i];
3167  if(coded){
3168  int mx,my;
3169  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3170  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3171  mx=my=0; //FIXME find the one we actually used
3172  ff_mpeg4_set_direct_mv(s, mx, my);
3173  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3174  mx= s->mv[1][0][0];
3175  my= s->mv[1][0][1];
3176  }else{
3177  mx= s->mv[0][0][0];
3178  my= s->mv[0][0][1];
3179  }
3180 
3181  s->mv_dir= best_s.mv_dir;
3182  s->mv_type = best_s.mv_type;
3183  s->mb_intra= 0;
3184 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3185  s->mv[0][0][1] = best_s.mv[0][0][1];
3186  s->mv[1][0][0] = best_s.mv[1][0][0];
3187  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3188  backup_s.dquant= 0;
3189  s->skipdct=1;
3190  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3191  &dmin, &next_block, mx, my);
3192  s->skipdct=0;
3193  }
3194  }
3195 
3196  s->current_picture.qscale_table[xy] = best_s.qscale;
3197 
3198  copy_context_after_encode(s, &best_s);
3199 
3200  pb_bits_count= put_bits_count(&s->pb);
3201  flush_put_bits(&s->pb);
3202  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3203  s->pb= backup_s.pb;
3204 
3205  if(s->data_partitioning){
3206  pb2_bits_count= put_bits_count(&s->pb2);
3207  flush_put_bits(&s->pb2);
3208  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3209  s->pb2= backup_s.pb2;
3210 
3211  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3212  flush_put_bits(&s->tex_pb);
3213  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3214  s->tex_pb= backup_s.tex_pb;
3215  }
3216  s->last_bits= put_bits_count(&s->pb);
3217 
3218  if (CONFIG_H263_ENCODER &&
3219  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3221 
3222  if(next_block==0){ //FIXME 16 vs linesize16
3223  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3224  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3225  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3226  }
3227 
3228  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3229  ff_mpv_reconstruct_mb(s, s->block);
3230  } else {
3231  int motion_x = 0, motion_y = 0;
3232  s->mv_type=MV_TYPE_16X16;
3233  // only one MB-Type possible
3234 
3235  switch(mb_type){
3237  s->mv_dir = 0;
3238  s->mb_intra= 1;
3239  motion_x= s->mv[0][0][0] = 0;
3240  motion_y= s->mv[0][0][1] = 0;
3241  break;
3243  s->mv_dir = MV_DIR_FORWARD;
3244  s->mb_intra= 0;
3245  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3246  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3247  break;
3249  s->mv_dir = MV_DIR_FORWARD;
3250  s->mv_type = MV_TYPE_FIELD;
3251  s->mb_intra= 0;
3252  for(i=0; i<2; i++){
3253  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3254  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3255  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3256  }
3257  break;
3259  s->mv_dir = MV_DIR_FORWARD;
3260  s->mv_type = MV_TYPE_8X8;
3261  s->mb_intra= 0;
3262  for(i=0; i<4; i++){
3263  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3264  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3265  }
3266  break;
3268  if (CONFIG_MPEG4_ENCODER) {
3270  s->mb_intra= 0;
3271  motion_x=s->b_direct_mv_table[xy][0];
3272  motion_y=s->b_direct_mv_table[xy][1];
3273  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3274  }
3275  break;
3277  if (CONFIG_MPEG4_ENCODER) {
3279  s->mb_intra= 0;
3280  ff_mpeg4_set_direct_mv(s, 0, 0);
3281  }
3282  break;
3284  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3285  s->mb_intra= 0;
3286  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3287  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3288  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3289  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3290  break;
3292  s->mv_dir = MV_DIR_BACKWARD;
3293  s->mb_intra= 0;
3294  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3295  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3296  break;
3298  s->mv_dir = MV_DIR_FORWARD;
3299  s->mb_intra= 0;
3300  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3301  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3302  break;
3304  s->mv_dir = MV_DIR_FORWARD;
3305  s->mv_type = MV_TYPE_FIELD;
3306  s->mb_intra= 0;
3307  for(i=0; i<2; i++){
3308  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3309  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3310  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3311  }
3312  break;
3314  s->mv_dir = MV_DIR_BACKWARD;
3315  s->mv_type = MV_TYPE_FIELD;
3316  s->mb_intra= 0;
3317  for(i=0; i<2; i++){
3318  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3319  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3320  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3321  }
3322  break;
3324  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3325  s->mv_type = MV_TYPE_FIELD;
3326  s->mb_intra= 0;
3327  for(dir=0; dir<2; dir++){
3328  for(i=0; i<2; i++){
3329  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3330  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3331  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3332  }
3333  }
3334  break;
3335  default:
3336  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3337  }
3338 
3339  encode_mb(s, motion_x, motion_y);
3340 
3341  // RAL: Update last macroblock type
3342  s->last_mv_dir = s->mv_dir;
3343 
3344  if (CONFIG_H263_ENCODER &&
3345  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3347 
3348  ff_mpv_reconstruct_mb(s, s->block);
3349  }
3350 
3351  /* clean the MV table in IPS frames for direct mode in B-frames */
3352  if(s->mb_intra /* && I,P,S_TYPE */){
3353  s->p_mv_table[xy][0]=0;
3354  s->p_mv_table[xy][1]=0;
3355  }
3356 
3357  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3358  int w= 16;
3359  int h= 16;
3360 
3361  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3362  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3363 
3364  s->current_picture.encoding_error[0] += sse(
3365  s, s->new_picture->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3366  s->dest[0], w, h, s->linesize);
3367  s->current_picture.encoding_error[1] += sse(
3368  s, s->new_picture->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3369  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3370  s->current_picture.encoding_error[2] += sse(
3371  s, s->new_picture->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3372  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3373  }
3374  if(s->loop_filter){
3375  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3377  }
3378  ff_dlog(s->avctx, "MB %d %d bits\n",
3379  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3380  }
3381  }
3382 
3383  //not beautiful here but we must write it before flushing so it has to be here
3384  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3386 
3387  write_slice_end(s);
3388 
3389  return 0;
3390 }
3391 
3392 #define MERGE(field) dst->field += src->field; src->field=0
3394  MERGE(me.scene_change_score);
3395  MERGE(me.mc_mb_var_sum_temp);
3396  MERGE(me.mb_var_sum_temp);
3397 }
3398 
3400  int i;
3401 
3402  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3403  MERGE(dct_count[1]);
3404  MERGE(mv_bits);
3405  MERGE(i_tex_bits);
3406  MERGE(p_tex_bits);
3407  MERGE(i_count);
3408  MERGE(skip_count);
3409  MERGE(misc_bits);
3413 
3414  if (dst->noise_reduction){
3415  for(i=0; i<64; i++){
3416  MERGE(dct_error_sum[0][i]);
3417  MERGE(dct_error_sum[1][i]);
3418  }
3419  }
3420 
3421  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3422  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3423  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3424  flush_put_bits(&dst->pb);
3425 }
3426 
3427 static int estimate_qp(MpegEncContext *s, int dry_run){
3428  if (s->next_lambda){
3429  s->current_picture_ptr->f->quality =
3430  s->current_picture.f->quality = s->next_lambda;
3431  if(!dry_run) s->next_lambda= 0;
3432  } else if (!s->fixed_qscale) {
3433  int quality = ff_rate_estimate_qscale(s, dry_run);
3434  s->current_picture_ptr->f->quality =
3435  s->current_picture.f->quality = quality;
3436  if (s->current_picture.f->quality < 0)
3437  return -1;
3438  }
3439 
3440  if(s->adaptive_quant){
3441  switch(s->codec_id){
3442  case AV_CODEC_ID_MPEG4:
3443  if (CONFIG_MPEG4_ENCODER)
3445  break;
3446  case AV_CODEC_ID_H263:
3447  case AV_CODEC_ID_H263P:
3448  case AV_CODEC_ID_FLV1:
3449  if (CONFIG_H263_ENCODER)
3451  break;
3452  default:
3454  }
3455 
3456  s->lambda= s->lambda_table[0];
3457  //FIXME broken
3458  }else
3459  s->lambda = s->current_picture.f->quality;
3460  update_qscale(s);
3461  return 0;
3462 }
3463 
3464 /* must be called before writing the header */
3466  av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3467  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3468 
3469  if(s->pict_type==AV_PICTURE_TYPE_B){
3470  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3471  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3472  }else{
3473  s->pp_time= s->time - s->last_non_b_time;
3474  s->last_non_b_time= s->time;
3475  av_assert1(s->picture_number==0 || s->pp_time > 0);
3476  }
3477 }
3478 
3480 {
3481  int i, ret;
3482  int bits;
3483  int context_count = s->slice_context_count;
3484 
3485  s->picture_number = picture_number;
3486 
3487  /* Reset the average MB variance */
3488  s->me.mb_var_sum_temp =
3489  s->me.mc_mb_var_sum_temp = 0;
3490 
3491  /* we need to initialize some time vars before we can encode B-frames */
3492  // RAL: Condition added for MPEG1VIDEO
3493  if (s->out_format == FMT_MPEG1 || (s->h263_pred && !s->msmpeg4_version))
3495  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3497 
3498  s->me.scene_change_score=0;
3499 
3500 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3501 
3502  if(s->pict_type==AV_PICTURE_TYPE_I){
3503  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3504  else s->no_rounding=0;
3505  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3506  if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3507  s->no_rounding ^= 1;
3508  }
3509 
3510  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3511  if (estimate_qp(s,1) < 0)
3512  return -1;
3514  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3515  if(s->pict_type==AV_PICTURE_TYPE_B)
3516  s->lambda= s->last_lambda_for[s->pict_type];
3517  else
3518  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3519  update_qscale(s);
3520  }
3521 
3522  if (s->out_format != FMT_MJPEG) {
3523  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3524  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3525  s->q_chroma_intra_matrix = s->q_intra_matrix;
3526  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3527  }
3528 
3529  s->mb_intra=0; //for the rate distortion & bit compare functions
3530  for(i=1; i<context_count; i++){
3531  ret = ff_update_duplicate_context(s->thread_context[i], s);
3532  if (ret < 0)
3533  return ret;
3534  }
3535 
3536  if(ff_init_me(s)<0)
3537  return -1;
3538 
3539  /* Estimate motion for every MB */
3540  if(s->pict_type != AV_PICTURE_TYPE_I){
3541  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3542  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3543  if (s->pict_type != AV_PICTURE_TYPE_B) {
3544  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3545  s->me_pre == 2) {
3546  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3547  }
3548  }
3549 
3550  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3551  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3552  /* I-Frame */
3553  for(i=0; i<s->mb_stride*s->mb_height; i++)
3554  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3555 
3556  if(!s->fixed_qscale){
3557  /* finding spatial complexity for I-frame rate control */
3558  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3559  }
3560  }
3561  for(i=1; i<context_count; i++){
3562  merge_context_after_me(s, s->thread_context[i]);
3563  }
3564  s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3565  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3566  emms_c();
3567 
3568  if (s->me.scene_change_score > s->scenechange_threshold &&
3569  s->pict_type == AV_PICTURE_TYPE_P) {
3570  s->pict_type= AV_PICTURE_TYPE_I;
3571  for(i=0; i<s->mb_stride*s->mb_height; i++)
3572  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3573  if(s->msmpeg4_version >= 3)
3574  s->no_rounding=1;
3575  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3576  s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3577  }
3578 
3579  if(!s->umvplus){
3580  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3581  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3582 
3583  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3584  int a,b;
3585  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3586  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3587  s->f_code= FFMAX3(s->f_code, a, b);
3588  }
3589 
3591  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3592  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3593  int j;
3594  for(i=0; i<2; i++){
3595  for(j=0; j<2; j++)
3596  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3597  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3598  }
3599  }
3600  } else if (s->pict_type == AV_PICTURE_TYPE_B) {
3601  int a, b;
3602 
3603  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3604  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3605  s->f_code = FFMAX(a, b);
3606 
3607  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3608  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3609  s->b_code = FFMAX(a, b);
3610 
3611  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3612  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3613  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3614  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3615  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3616  int dir, j;
3617  for(dir=0; dir<2; dir++){
3618  for(i=0; i<2; i++){
3619  for(j=0; j<2; j++){
3622  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3623  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3624  }
3625  }
3626  }
3627  }
3628  }
3629  }
3630 
3631  if (estimate_qp(s, 0) < 0)
3632  return -1;
3633 
3634  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3635  s->pict_type == AV_PICTURE_TYPE_I &&
3636  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3637  s->qscale= 3; //reduce clipping problems
3638 
3639  if (s->out_format == FMT_MJPEG) {
3640  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3641  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3642 
3643  if (s->avctx->intra_matrix) {
3644  chroma_matrix =
3645  luma_matrix = s->avctx->intra_matrix;
3646  }
3647  if (s->avctx->chroma_intra_matrix)
3648  chroma_matrix = s->avctx->chroma_intra_matrix;
3649 
3650  /* for mjpeg, we do include qscale in the matrix */
3651  for(i=1;i<64;i++){
3652  int j = s->idsp.idct_permutation[i];
3653 
3654  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3655  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3656  }
3657  s->y_dc_scale_table=
3658  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3659  s->chroma_intra_matrix[0] =
3660  s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3661  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3662  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3663  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3664  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3665  s->qscale= 8;
3666 
3667  if (s->codec_id == AV_CODEC_ID_AMV) {
3668  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3669  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3670  for (int i = 1; i < 64; i++) {
3671  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
3672 
3673  s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3674  s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3675  }
3676  s->y_dc_scale_table = y;
3677  s->c_dc_scale_table = c;
3678  s->intra_matrix[0] = 13;
3679  s->chroma_intra_matrix[0] = 14;
3680  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3681  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3682  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3683  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3684  s->qscale = 8;
3685  }
3686  } else if (s->out_format == FMT_SPEEDHQ) {
3687  s->y_dc_scale_table=
3688  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3689  }
3690 
3691  //FIXME var duplication
3692  s->current_picture_ptr->f->key_frame =
3693  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3694  s->current_picture_ptr->f->pict_type =
3695  s->current_picture.f->pict_type = s->pict_type;
3696 
3697  if (s->current_picture.f->key_frame)
3698  s->picture_in_gop_number=0;
3699 
3700  s->mb_x = s->mb_y = 0;
3701  s->last_bits= put_bits_count(&s->pb);
3702  switch(s->out_format) {
3703 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3704  case FMT_MJPEG:
3706  break;
3707 #endif
3708  case FMT_SPEEDHQ:
3709  if (CONFIG_SPEEDHQ_ENCODER)
3711  break;
3712  case FMT_H261:
3713  if (CONFIG_H261_ENCODER)
3715  break;
3716  case FMT_H263:
3717  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3719  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3721  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3723  if (ret < 0)
3724  return ret;
3725  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3727  if (ret < 0)
3728  return ret;
3729  }
3730  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3732  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3734  else if (CONFIG_H263_ENCODER)
3736  break;
3737  case FMT_MPEG1:
3738  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3740  break;
3741  default:
3742  av_assert0(0);
3743  }
3744  bits= put_bits_count(&s->pb);
3745  s->header_bits= bits - s->last_bits;
3746 
3747  for(i=1; i<context_count; i++){
3748  update_duplicate_context_after_me(s->thread_context[i], s);
3749  }
3750  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3751  for(i=1; i<context_count; i++){
3752  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3753  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3754  merge_context_after_encode(s, s->thread_context[i]);
3755  }
3756  emms_c();
3757  return 0;
3758 }
3759 
3760 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3761  const int intra= s->mb_intra;
3762  int i;
3763 
3764  s->dct_count[intra]++;
3765 
3766  for(i=0; i<64; i++){
3767  int level= block[i];
3768 
3769  if(level){
3770  if(level>0){
3771  s->dct_error_sum[intra][i] += level;
3772  level -= s->dct_offset[intra][i];
3773  if(level<0) level=0;
3774  }else{
3775  s->dct_error_sum[intra][i] -= level;
3776  level += s->dct_offset[intra][i];
3777  if(level>0) level=0;
3778  }
3779  block[i]= level;
3780  }
3781  }
3782 }
3783 
3785  int16_t *block, int n,
3786  int qscale, int *overflow){
3787  const int *qmat;
3788  const uint16_t *matrix;
3789  const uint8_t *scantable;
3790  const uint8_t *perm_scantable;
3791  int max=0;
3792  unsigned int threshold1, threshold2;
3793  int bias=0;
3794  int run_tab[65];
3795  int level_tab[65];
3796  int score_tab[65];
3797  int survivor[65];
3798  int survivor_count;
3799  int last_run=0;
3800  int last_level=0;
3801  int last_score= 0;
3802  int last_i;
3803  int coeff[2][64];
3804  int coeff_count[64];
3805  int qmul, qadd, start_i, last_non_zero, i, dc;
3806  const int esc_length= s->ac_esc_length;
3807  uint8_t * length;
3808  uint8_t * last_length;
3809  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3810  int mpeg2_qscale;
3811 
3812  s->fdsp.fdct(block);
3813 
3814  if(s->dct_error_sum)
3815  s->denoise_dct(s, block);
3816  qmul= qscale*16;
3817  qadd= ((qscale-1)|1)*8;
3818 
3819  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3820  else mpeg2_qscale = qscale << 1;
3821 
3822  if (s->mb_intra) {
3823  int q;
3824  scantable= s->intra_scantable.scantable;
3825  perm_scantable= s->intra_scantable.permutated;
3826  if (!s->h263_aic) {
3827  if (n < 4)
3828  q = s->y_dc_scale;
3829  else
3830  q = s->c_dc_scale;
3831  q = q << 3;
3832  } else{
3833  /* For AIC we skip quant/dequant of INTRADC */
3834  q = 1 << 3;
3835  qadd=0;
3836  }
3837 
3838  /* note: block[0] is assumed to be positive */
3839  block[0] = (block[0] + (q >> 1)) / q;
3840  start_i = 1;
3841  last_non_zero = 0;
3842  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3843  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3844  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3845  bias= 1<<(QMAT_SHIFT-1);
3846 
3847  if (n > 3 && s->intra_chroma_ac_vlc_length) {
3848  length = s->intra_chroma_ac_vlc_length;
3849  last_length= s->intra_chroma_ac_vlc_last_length;
3850  } else {
3851  length = s->intra_ac_vlc_length;
3852  last_length= s->intra_ac_vlc_last_length;
3853  }
3854  } else {
3855  scantable= s->inter_scantable.scantable;
3856  perm_scantable= s->inter_scantable.permutated;
3857  start_i = 0;
3858  last_non_zero = -1;
3859  qmat = s->q_inter_matrix[qscale];
3860  matrix = s->inter_matrix;
3861  length = s->inter_ac_vlc_length;
3862  last_length= s->inter_ac_vlc_last_length;
3863  }
3864  last_i= start_i;
3865 
3866  threshold1= (1<<QMAT_SHIFT) - bias - 1;
3867  threshold2= (threshold1<<1);
3868 
3869  for(i=63; i>=start_i; i--) {
3870  const int j = scantable[i];
3871  int level = block[j] * qmat[j];
3872 
3873  if(((unsigned)(level+threshold1))>threshold2){
3874  last_non_zero = i;
3875  break;
3876  }
3877  }
3878 
3879  for(i=start_i; i<=last_non_zero; i++) {
3880  const int j = scantable[i];
3881  int level = block[j] * qmat[j];
3882 
3883 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3884 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3885  if(((unsigned)(level+threshold1))>threshold2){
3886  if(level>0){
3887  level= (bias + level)>>QMAT_SHIFT;
3888  coeff[0][i]= level;
3889  coeff[1][i]= level-1;
3890 // coeff[2][k]= level-2;
3891  }else{
3892  level= (bias - level)>>QMAT_SHIFT;
3893  coeff[0][i]= -level;
3894  coeff[1][i]= -level+1;
3895 // coeff[2][k]= -level+2;
3896  }
3897  coeff_count[i]= FFMIN(level, 2);
3898  av_assert2(coeff_count[i]);
3899  max |=level;
3900  }else{
3901  coeff[0][i]= (level>>31)|1;
3902  coeff_count[i]= 1;
3903  }
3904  }
3905 
3906  *overflow= s->max_qcoeff < max; //overflow might have happened
3907 
3908  if(last_non_zero < start_i){
3909  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3910  return last_non_zero;
3911  }
3912 
3913  score_tab[start_i]= 0;
3914  survivor[0]= start_i;
3915  survivor_count= 1;
3916 
3917  for(i=start_i; i<=last_non_zero; i++){
3918  int level_index, j, zero_distortion;
3919  int dct_coeff= FFABS(block[ scantable[i] ]);
3920  int best_score=256*256*256*120;
3921 
3922  if (s->fdsp.fdct == ff_fdct_ifast)
3923  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3924  zero_distortion= dct_coeff*dct_coeff;
3925 
3926  for(level_index=0; level_index < coeff_count[i]; level_index++){
3927  int distortion;
3928  int level= coeff[level_index][i];
3929  const int alevel= FFABS(level);
3930  int unquant_coeff;
3931 
3932  av_assert2(level);
3933 
3934  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3935  unquant_coeff= alevel*qmul + qadd;
3936  } else if(s->out_format == FMT_MJPEG) {
3937  j = s->idsp.idct_permutation[scantable[i]];
3938  unquant_coeff = alevel * matrix[j] * 8;
3939  }else{ // MPEG-1
3940  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
3941  if(s->mb_intra){
3942  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
3943  unquant_coeff = (unquant_coeff - 1) | 1;
3944  }else{
3945  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
3946  unquant_coeff = (unquant_coeff - 1) | 1;
3947  }
3948  unquant_coeff<<= 3;
3949  }
3950 
3951  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3952  level+=64;
3953  if((level&(~127)) == 0){
3954  for(j=survivor_count-1; j>=0; j--){
3955  int run= i - survivor[j];
3956  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3957  score += score_tab[i-run];
3958 
3959  if(score < best_score){
3960  best_score= score;
3961  run_tab[i+1]= run;
3962  level_tab[i+1]= level-64;
3963  }
3964  }
3965 
3966  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3967  for(j=survivor_count-1; j>=0; j--){
3968  int run= i - survivor[j];
3969  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3970  score += score_tab[i-run];
3971  if(score < last_score){
3972  last_score= score;
3973  last_run= run;
3974  last_level= level-64;
3975  last_i= i+1;
3976  }
3977  }
3978  }
3979  }else{
3980  distortion += esc_length*lambda;
3981  for(j=survivor_count-1; j>=0; j--){
3982  int run= i - survivor[j];
3983  int score= distortion + score_tab[i-run];
3984 
3985  if(score < best_score){
3986  best_score= score;
3987  run_tab[i+1]= run;
3988  level_tab[i+1]= level-64;
3989  }
3990  }
3991 
3992  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3993  for(j=survivor_count-1; j>=0; j--){
3994  int run= i - survivor[j];
3995  int score= distortion + score_tab[i-run];
3996  if(score < last_score){
3997  last_score= score;
3998  last_run= run;
3999  last_level= level-64;
4000  last_i= i+1;
4001  }
4002  }
4003  }
4004  }
4005  }
4006 
4007  score_tab[i+1]= best_score;
4008 
4009  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4010  if(last_non_zero <= 27){
4011  for(; survivor_count; survivor_count--){
4012  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4013  break;
4014  }
4015  }else{
4016  for(; survivor_count; survivor_count--){
4017  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4018  break;
4019  }
4020  }
4021 
4022  survivor[ survivor_count++ ]= i+1;
4023  }
4024 
4025  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4026  last_score= 256*256*256*120;
4027  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4028  int score= score_tab[i];
4029  if (i)
4030  score += lambda * 2; // FIXME more exact?
4031 
4032  if(score < last_score){
4033  last_score= score;
4034  last_i= i;
4035  last_level= level_tab[i];
4036  last_run= run_tab[i];
4037  }
4038  }
4039  }
4040 
4041  s->coded_score[n] = last_score;
4042 
4043  dc= FFABS(block[0]);
4044  last_non_zero= last_i - 1;
4045  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4046 
4047  if(last_non_zero < start_i)
4048  return last_non_zero;
4049 
4050  if(last_non_zero == 0 && start_i == 0){
4051  int best_level= 0;
4052  int best_score= dc * dc;
4053 
4054  for(i=0; i<coeff_count[0]; i++){
4055  int level= coeff[i][0];
4056  int alevel= FFABS(level);
4057  int unquant_coeff, score, distortion;
4058 
4059  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4060  unquant_coeff= (alevel*qmul + qadd)>>3;
4061  } else{ // MPEG-1
4062  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4063  unquant_coeff = (unquant_coeff - 1) | 1;
4064  }
4065  unquant_coeff = (unquant_coeff + 4) >> 3;
4066  unquant_coeff<<= 3 + 3;
4067 
4068  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4069  level+=64;
4070  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4071  else score= distortion + esc_length*lambda;
4072 
4073  if(score < best_score){
4074  best_score= score;
4075  best_level= level - 64;
4076  }
4077  }
4078  block[0]= best_level;
4079  s->coded_score[n] = best_score - dc*dc;
4080  if(best_level == 0) return -1;
4081  else return last_non_zero;
4082  }
4083 
4084  i= last_i;
4085  av_assert2(last_level);
4086 
4087  block[ perm_scantable[last_non_zero] ]= last_level;
4088  i -= last_run + 1;
4089 
4090  for(; i>start_i; i -= run_tab[i] + 1){
4091  block[ perm_scantable[i-1] ]= level_tab[i];
4092  }
4093 
4094  return last_non_zero;
4095 }
4096 
4097 static int16_t basis[64][64];
4098 
4099 static void build_basis(uint8_t *perm){
4100  int i, j, x, y;
4101  emms_c();
4102  for(i=0; i<8; i++){
4103  for(j=0; j<8; j++){
4104  for(y=0; y<8; y++){
4105  for(x=0; x<8; x++){
4106  double s= 0.25*(1<<BASIS_SHIFT);
4107  int index= 8*i + j;
4108  int perm_index= perm[index];
4109  if(i==0) s*= sqrt(0.5);
4110  if(j==0) s*= sqrt(0.5);
4111  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4112  }
4113  }
4114  }
4115  }
4116 }
4117 
4118 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4119  int16_t *block, int16_t *weight, int16_t *orig,
4120  int n, int qscale){
4121  int16_t rem[64];
4122  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4123  const uint8_t *scantable;
4124  const uint8_t *perm_scantable;
4125 // unsigned int threshold1, threshold2;
4126 // int bias=0;
4127  int run_tab[65];
4128  int prev_run=0;
4129  int prev_level=0;
4130  int qmul, qadd, start_i, last_non_zero, i, dc;
4131  uint8_t * length;
4132  uint8_t * last_length;
4133  int lambda;
4134  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4135 
4136  if(basis[0][0] == 0)
4137  build_basis(s->idsp.idct_permutation);
4138 
4139  qmul= qscale*2;
4140  qadd= (qscale-1)|1;
4141  if (s->mb_intra) {
4142  scantable= s->intra_scantable.scantable;
4143  perm_scantable= s->intra_scantable.permutated;
4144  if (!s->h263_aic) {
4145  if (n < 4)
4146  q = s->y_dc_scale;
4147  else
4148  q = s->c_dc_scale;
4149  } else{
4150  /* For AIC we skip quant/dequant of INTRADC */
4151  q = 1;
4152  qadd=0;
4153  }
4154  q <<= RECON_SHIFT-3;
4155  /* note: block[0] is assumed to be positive */
4156  dc= block[0]*q;
4157 // block[0] = (block[0] + (q >> 1)) / q;
4158  start_i = 1;
4159 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4160 // bias= 1<<(QMAT_SHIFT-1);
4161  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4162  length = s->intra_chroma_ac_vlc_length;
4163  last_length= s->intra_chroma_ac_vlc_last_length;
4164  } else {
4165  length = s->intra_ac_vlc_length;
4166  last_length= s->intra_ac_vlc_last_length;
4167  }
4168  } else {
4169  scantable= s->inter_scantable.scantable;
4170  perm_scantable= s->inter_scantable.permutated;
4171  dc= 0;
4172  start_i = 0;
4173  length = s->inter_ac_vlc_length;
4174  last_length= s->inter_ac_vlc_last_length;
4175  }
4176  last_non_zero = s->block_last_index[n];
4177 
4178  dc += (1<<(RECON_SHIFT-1));
4179  for(i=0; i<64; i++){
4180  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4181  }
4182 
4183  sum=0;
4184  for(i=0; i<64; i++){
4185  int one= 36;
4186  int qns=4;
4187  int w;
4188 
4189  w= FFABS(weight[i]) + qns*one;
4190  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4191 
4192  weight[i] = w;
4193 // w=weight[i] = (63*qns + (w/2)) / w;
4194 
4195  av_assert2(w>0);
4196  av_assert2(w<(1<<6));
4197  sum += w*w;
4198  }
4199  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4200 
4201  run=0;
4202  rle_index=0;
4203  for(i=start_i; i<=last_non_zero; i++){
4204  int j= perm_scantable[i];
4205  const int level= block[j];
4206  int coeff;
4207 
4208  if(level){
4209  if(level<0) coeff= qmul*level - qadd;
4210  else coeff= qmul*level + qadd;
4211  run_tab[rle_index++]=run;
4212  run=0;
4213 
4214  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4215  }else{
4216  run++;
4217  }
4218  }
4219 
4220  for(;;){
4221  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4222  int best_coeff=0;
4223  int best_change=0;
4224  int run2, best_unquant_change=0, analyze_gradient;
4225  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4226 
4227  if(analyze_gradient){
4228  for(i=0; i<64; i++){
4229  int w= weight[i];
4230 
4231  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4232  }
4233  s->fdsp.fdct(d1);
4234  }
4235 
4236  if(start_i){
4237  const int level= block[0];
4238  int change, old_coeff;
4239 
4240  av_assert2(s->mb_intra);
4241 
4242  old_coeff= q*level;
4243 
4244  for(change=-1; change<=1; change+=2){
4245  int new_level= level + change;
4246  int score, new_coeff;
4247 
4248  new_coeff= q*new_level;
4249  if(new_coeff >= 2048 || new_coeff < 0)
4250  continue;
4251 
4252  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4253  new_coeff - old_coeff);
4254  if(score<best_score){
4255  best_score= score;
4256  best_coeff= 0;
4257  best_change= change;
4258  best_unquant_change= new_coeff - old_coeff;
4259  }
4260  }
4261  }
4262 
4263  run=0;
4264  rle_index=0;
4265  run2= run_tab[rle_index++];
4266  prev_level=0;
4267  prev_run=0;
4268 
4269  for(i=start_i; i<64; i++){
4270  int j= perm_scantable[i];
4271  const int level= block[j];
4272  int change, old_coeff;
4273 
4274  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4275  break;
4276 
4277  if(level){
4278  if(level<0) old_coeff= qmul*level - qadd;
4279  else old_coeff= qmul*level + qadd;
4280  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4281  }else{
4282  old_coeff=0;
4283  run2--;
4284  av_assert2(run2>=0 || i >= last_non_zero );
4285  }
4286 
4287  for(change=-1; change<=1; change+=2){
4288  int new_level= level + change;
4289  int score, new_coeff, unquant_change;
4290 
4291  score=0;
4292  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4293  continue;
4294 
4295  if(new_level){
4296  if(new_level<0) new_coeff= qmul*new_level - qadd;
4297  else new_coeff= qmul*new_level + qadd;
4298  if(new_coeff >= 2048 || new_coeff <= -2048)
4299  continue;
4300  //FIXME check for overflow
4301 
4302  if(level){
4303  if(level < 63 && level > -63){
4304  if(i < last_non_zero)
4305  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4306  - length[UNI_AC_ENC_INDEX(run, level+64)];
4307  else
4308  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4309  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4310  }
4311  }else{
4312  av_assert2(FFABS(new_level)==1);
4313 
4314  if(analyze_gradient){
4315  int g= d1[ scantable[i] ];
4316  if(g && (g^new_level) >= 0)
4317  continue;
4318  }
4319 
4320  if(i < last_non_zero){
4321  int next_i= i + run2 + 1;
4322  int next_level= block[ perm_scantable[next_i] ] + 64;
4323 
4324  if(next_level&(~127))
4325  next_level= 0;
4326 
4327  if(next_i < last_non_zero)
4328  score += length[UNI_AC_ENC_INDEX(run, 65)]
4329  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4330  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4331  else
4332  score += length[UNI_AC_ENC_INDEX(run, 65)]
4333  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4334  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4335  }else{
4336  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4337  if(prev_level){
4338  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4339  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4340  }
4341  }
4342  }
4343  }else{
4344  new_coeff=0;
4345  av_assert2(FFABS(level)==1);
4346 
4347  if(i < last_non_zero){
4348  int next_i= i + run2 + 1;
4349  int next_level= block[ perm_scantable[next_i] ] + 64;
4350 
4351  if(next_level&(~127))
4352  next_level= 0;
4353 
4354  if(next_i < last_non_zero)
4355  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4356  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4357  - length[UNI_AC_ENC_INDEX(run, 65)];
4358  else
4359  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4360  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4361  - length[UNI_AC_ENC_INDEX(run, 65)];
4362  }else{
4363  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4364  if(prev_level){
4365  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4366  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4367  }
4368  }
4369  }
4370 
4371  score *= lambda;
4372 
4373  unquant_change= new_coeff - old_coeff;
4374  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4375 
4376  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4377  unquant_change);
4378  if(score<best_score){
4379  best_score= score;
4380  best_coeff= i;
4381  best_change= change;
4382  best_unquant_change= unquant_change;
4383  }
4384  }
4385  if(level){
4386  prev_level= level + 64;
4387  if(prev_level&(~127))
4388  prev_level= 0;
4389  prev_run= run;
4390  run=0;
4391  }else{
4392  run++;
4393  }
4394  }
4395 
4396  if(best_change){
4397  int j= perm_scantable[ best_coeff ];
4398 
4399  block[j] += best_change;
4400 
4401  if(best_coeff > last_non_zero){
4402  last_non_zero= best_coeff;
4403  av_assert2(block[j]);
4404  }else{
4405  for(; last_non_zero>=start_i; last_non_zero--){
4406  if(block[perm_scantable[last_non_zero]])
4407  break;
4408  }
4409  }
4410 
4411  run=0;
4412  rle_index=0;
4413  for(i=start_i; i<=last_non_zero; i++){
4414  int j= perm_scantable[i];
4415  const int level= block[j];
4416 
4417  if(level){
4418  run_tab[rle_index++]=run;
4419  run=0;
4420  }else{
4421  run++;
4422  }
4423  }
4424 
4425  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4426  }else{
4427  break;
4428  }
4429  }
4430 
4431  return last_non_zero;
4432 }
4433 
4434 /**
4435  * Permute an 8x8 block according to permutation.
4436  * @param block the block which will be permuted according to
4437  * the given permutation vector
4438  * @param permutation the permutation vector
4439  * @param last the last non zero coefficient in scantable order, used to
4440  * speed the permutation up
4441  * @param scantable the used scantable, this is only used to speed the
4442  * permutation up, the block is not (inverse) permutated
4443  * to scantable order!
4444  */
4445 void ff_block_permute(int16_t *block, uint8_t *permutation,
4446  const uint8_t *scantable, int last)
4447 {
4448  int i;
4449  int16_t temp[64];
4450 
4451  if (last <= 0)
4452  return;
4453  //FIXME it is ok but not clean and might fail for some permutations
4454  // if (permutation[1] == 1)
4455  // return;
4456 
4457  for (i = 0; i <= last; i++) {
4458  const int j = scantable[i];
4459  temp[j] = block[j];
4460  block[j] = 0;
4461  }
4462 
4463  for (i = 0; i <= last; i++) {
4464  const int j = scantable[i];
4465  const int perm_j = permutation[j];
4466  block[perm_j] = temp[j];
4467  }
4468 }
4469 
4471  int16_t *block, int n,
4472  int qscale, int *overflow)
4473 {
4474  int i, j, level, last_non_zero, q, start_i;
4475  const int *qmat;
4476  const uint8_t *scantable;
4477  int bias;
4478  int max=0;
4479  unsigned int threshold1, threshold2;
4480 
4481  s->fdsp.fdct(block);
4482 
4483  if(s->dct_error_sum)
4484  s->denoise_dct(s, block);
4485 
4486  if (s->mb_intra) {
4487  scantable= s->intra_scantable.scantable;
4488  if (!s->h263_aic) {
4489  if (n < 4)
4490  q = s->y_dc_scale;
4491  else
4492  q = s->c_dc_scale;
4493  q = q << 3;
4494  } else
4495  /* For AIC we skip quant/dequant of INTRADC */
4496  q = 1 << 3;
4497 
4498  /* note: block[0] is assumed to be positive */
4499  block[0] = (block[0] + (q >> 1)) / q;
4500  start_i = 1;
4501  last_non_zero = 0;
4502  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4503  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4504  } else {
4505  scantable= s->inter_scantable.scantable;
4506  start_i = 0;
4507  last_non_zero = -1;
4508  qmat = s->q_inter_matrix[qscale];
4509  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4510  }
4511  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4512  threshold2= (threshold1<<1);
4513  for(i=63;i>=start_i;i--) {
4514  j = scantable[i];
4515  level = block[j] * qmat[j];
4516 
4517  if(((unsigned)(level+threshold1))>threshold2){
4518  last_non_zero = i;
4519  break;
4520  }else{
4521  block[j]=0;
4522  }
4523  }
4524  for(i=start_i; i<=last_non_zero; i++) {
4525  j = scantable[i];
4526  level = block[j] * qmat[j];
4527 
4528 // if( bias+level >= (1<<QMAT_SHIFT)
4529 // || bias-level >= (1<<QMAT_SHIFT)){
4530  if(((unsigned)(level+threshold1))>threshold2){
4531  if(level>0){
4532  level= (bias + level)>>QMAT_SHIFT;
4533  block[j]= level;
4534  }else{
4535  level= (bias - level)>>QMAT_SHIFT;
4536  block[j]= -level;
4537  }
4538  max |=level;
4539  }else{
4540  block[j]=0;
4541  }
4542  }
4543  *overflow= s->max_qcoeff < max; //overflow might have happened
4544 
4545  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4546  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4547  ff_block_permute(block, s->idsp.idct_permutation,
4548  scantable, last_non_zero);
4549 
4550  return last_non_zero;
4551 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:98
MpegEncContext::i_count
int i_count
Definition: mpegvideo.h:326
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:738
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:83
ff_speedhq_end_slice
void ff_speedhq_end_slice(MpegEncContext *s)
Definition: speedhqenc.c:143
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
MpegEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:271
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:246
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
h263data.h
ff_speedhq_encode_init
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
Definition: speedhqenc.c:95
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:101
level
uint8_t level
Definition: svq3.c:206
MpegEncContext::data_partitioning
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:379
av_clip
#define av_clip
Definition: common.h:95
set_frame_distances
static void set_frame_distances(MpegEncContext *s)
Definition: mpegvideo_enc.c:3465
get_bits_diff
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideoenc.h:139
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:390
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:189
H263_GOB_HEIGHT
#define H263_GOB_HEIGHT(h)
Definition: h263.h:28
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
r
const char * r
Definition: vf_curves.c:116
acc
int acc
Definition: yuv2rgb.c:554
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_wmv2_encode_picture_header
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:93
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:221
ff_mpeg1_encode_mb
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
MpegEncContext::b_code
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:219
mem_internal.h
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:507
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:254
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1305
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1221
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1649
ff_speedhq_encode_picture_header
void ff_speedhq_encode_picture_header(MpegEncContext *s)
Definition: speedhqenc.c:131
thread.h
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2662
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:602
encode_mb
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2409
matrix
Definition: vc1dsp.c:42
src1
const pixel * src1
Definition: h264pred_template.c:421
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:216
mpegvideoenc.h
sse_mb
static int sse_mb(MpegEncContext *s)
Definition: mpegvideo_enc.c:2558
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
COPY
#define COPY(a)
ff_block_permute
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4445
ff_qpeldsp_init
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:784
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4097
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:874
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:163
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:163
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2611
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:708
update_noise_reduction
static void update_noise_reduction(MpegEncContext *s)
Definition: mpegvideo_enc.c:1570
out_size
int out_size
Definition: movenc.c:55
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:244
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:62
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
ff_flv_encode_picture_header
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:28
MpegEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:380
MpegEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:312
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:476
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:221
INTERLACED_DCT
#define INTERLACED_DCT(s)
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:432
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:37
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:107
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:215
w
uint8_t w
Definition: llviddspenc.c:38
BUF_BITS
static const int BUF_BITS
Definition: put_bits.h:48
internal.h
ff_h261_encode_mb
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:247
AVPacket::data
uint8_t * data
Definition: packet.h:374
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
Definition: mpegvideo_enc.c:993
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:369
AVOption
AVOption.
Definition: opt.h:251
encode.h
b
#define b
Definition: input.c:34
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:100
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:109
data
const char data[16]
Definition: mxf.c:143
MpegEncContext::vbv_delay
int vbv_delay
Definition: mpegvideo.h:195
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
MpegEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideo.h:207
ff_mjpeg_encode_init
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:294
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegutils.h:114
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1371
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:1673
ff_msmpeg4_encode_mb
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:372
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:39
max
#define max(a, b)
Definition: cuda_runtime.h:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:79
mathematics.h
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:471
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:253
Picture
Picture.
Definition: mpegpicture.h:46
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:100
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2592
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
MpegEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideo.h:134
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:862
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:98
wmv2enc.h
ff_add_cpb_side_data
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:1032
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1185
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:287
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:525
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:220
ff_set_cmp
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:476
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:545
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:429
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:73
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
MPEGVIDEO_MAX_PLANES
#define MPEGVIDEO_MAX_PLANES
Definition: mpegpicture.h:32
MpegEncContext::mv
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:256
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:243
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:53
ff_h261_encode_picture_header
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:64
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:108
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:108
MAX_FCODE
#define MAX_FCODE
Definition: mpegutils.h:41
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:794
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:232
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1662
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:65
sp5x.h
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
estimate_qp
static int estimate_qp(MpegEncContext *s, int dry_run)
Definition: mpegvideo_enc.c:3427
FDCTDSPContext
Definition: fdctdsp.h:26
faandct.h
Floating point AAN DCT.
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:194
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:196
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:872
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:398
mpeg12enc.h
merge_context_after_me
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3393
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:190
ff_rv20_encode_picture_header
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:37
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:45
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:452
fail
#define fail()
Definition: checkasm.h:131
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:138
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:105
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
get_intra_count
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:970
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1016
ff_h263dsp_init
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:469
CONFIG_MSMPEG4_ENCODER
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4enc.h:48
perm
perm
Definition: f_perms.c:74
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:447
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2690
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:270
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
pts
static int64_t pts
Definition: transcode_aac.c:654
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:42
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:65
ff_h263_update_motion_val
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:53
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:245
ff_sqrt
#define ff_sqrt
Definition: mathops.h:208
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
mpv_encode_init_static
static void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:266
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:99
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:432
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
ff_dct_encode_init
av_cold int ff_dct_encode_init(MpegEncContext *s)
Definition: mpegvideo_enc.c:291
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:266
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2722
ff_mjpeg_encode_mb
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:523
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:867
MpegEncContext::i_tex_bits
int i_tex_bits
Definition: mpegvideo.h:324
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:879
frame_start
static int frame_start(MpegEncContext *s)
Definition: mpegvideo_enc.c:1591
RateControlContext
rate control context.
Definition: ratecontrol.h:63
mpeg12.h
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:179
ff_init_qscale_tab
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:236
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
update_mb_info
static void update_mb_info(MpegEncContext *s, int startcode)
Definition: mpegvideo_enc.c:2699
av_cold
#define av_cold
Definition: attributes.h:90
dct.h
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
MAX_MV
#define MAX_MV
Definition: motion_est.h:35
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:93
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:129
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4099
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:685
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
width
#define width
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:40
ff_h263_chroma_qscale_table
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:260
s
#define s(width, name)
Definition: cbs_vp9.c:256
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:112
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:449
encode_frame
static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1193
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
Definition: mpegpicture.c:317
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
ff_mpeg2_dc_scale_table
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideodata.c:77
ff_mpeg1_encode_picture_header
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
MpegEncContext::mv_dir
int mv_dir
Definition: mpegvideo.h:241
g
const char * g
Definition: vf_curves.c:117
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:863
sse
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2537
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1489
direct
static void fn() direct(const ftype *in, const ctype *ir, int len, ftype *out)
Definition: afir_template.c:248
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:68
bits
uint8_t bits
Definition: vp3data.h:141
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
FMT_H261
@ FMT_H261
Definition: mpegutils.h:118
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1448
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:220
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:64
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1214
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1397
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:104
PutBitContext
Definition: put_bits.h:50
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:399
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:729
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2636
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
if
if(ret)
Definition: filter_design.txt:179
Picture::reference
int reference
Definition: mpegpicture.h:89
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1199
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:462
MpegEncContext::pb2
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:383
sqr
static double sqr(double x)
Definition: af_adynamicequalizer.c:84
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:177
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:37
ff_mpeg4_encode_mb
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:483
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
PutBitContext::buf
uint8_t * buf
Definition: put_bits.h:53
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:194
NULL
#define NULL
Definition: coverity.c:32
ff_mpeg4_encode_picture_header
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg4videoenc.c:1058
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:596
write_mb_info
static void write_mb_info(MpegEncContext *s)
Definition: mpegvideo_enc.c:2679
run
uint8_t run
Definition: svq3.c:205
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:268
MpegEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideo.h:310
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:218
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:331
me
#define me
Definition: vf_colormatrix.c:104
aandcttab.h
EDGE_WIDTH
#define EDGE_WIDTH
Definition: mpegpicture.h:34
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:424
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:439
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:48
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:114
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo_enc.c:249
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:1709
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:274
mathops.h
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:283
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3392
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:784
ff_vbv_update
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:680
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:925
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:863
qpeldsp.h
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:115
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1249
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:67
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1322
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:253
ff_mjpeg_amv_encode_picture_header
void ff_mjpeg_amv_encode_picture_header(MpegEncContext *s)
Definition: mjpegenc.c:93
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegutils.h:120
AVOnce
#define AVOnce
Definition: thread.h:176
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
ff_mpeg1_encode_init
void ff_mpeg1_encode_init(MpegEncContext *s)
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1562
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:51
ff_clean_h263_qscales
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
Definition: ituh263enc.c:272
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:247
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:715
mpv_encode_defaults
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:276
denoise_dct_c
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
Definition: mpegvideo_enc.c:3760
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:404
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1335
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:512
get_sae
static int get_sae(uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:956
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:131
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1256
encode_mb_internal
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2017
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:228
ff_h263_encode_picture_header
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:108
merge_context_after_encode
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3399
MpegEncContext::mb_intra
int mb_intra
Definition: mpegvideo.h:270
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:81
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:249
AVPacket::size
int size
Definition: packet.h:375
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:377
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:584
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:187
load_input_picture
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1001
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:57
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:343
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
Definition: mpegvideo_enc.c:1903
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:280
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1060
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:111
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:34
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:38
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:190
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
MpegEncContext::dct_count
int dct_count[2]
Definition: mpegvideo.h:311
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegutils.h:121
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:54
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:974
size
int size
Definition: twinvq_data.h:10344
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:280
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:95
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:86
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:41
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:112
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:94
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:114
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:373
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:241
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
MpegEncContext::mv_type
int mv_type
Definition: mpegvideo.h:245
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:117
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:249
ff_h263_encode_mb
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:494
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:380
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:62
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:119
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:87
Picture::encoding_error
uint64_t encoding_error[MPEGVIDEO_MAX_PLANES]
Definition: mpegpicture.h:92
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:1303
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:36
M_PI
#define M_PI
Definition: mathematics.h:52
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
PutBitContext::buf_end
uint8_t * buf_end
Definition: put_bits.h:53
AVCodec::id
enum AVCodecID id
Definition: codec.h:210
update_qscale
static void update_qscale(MpegEncContext *s)
Definition: mpegvideo_enc.c:192
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:140
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:450
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:593
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:134
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:57
ff_speedhq_encode_mb
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: speedhqenc.c:240
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:454
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:55
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:448
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:367
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3784
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2756
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:35
estimate_best_b_count
static int estimate_best_b_count(MpegEncContext *s)
Definition: mpegvideo_enc.c:1214
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:890
src2
const pixel * src2
Definition: h264pred_template.c:422
MpegEncContext::tex_pb
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:382
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:109
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:268
qmat16
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:48
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:56
av_always_inline
#define av_always_inline
Definition: attributes.h:49
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:119
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:816
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
get_visual_weight
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:1993
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:795
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:477
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
copy_context_before_encode
static void copy_context_before_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2419
ff_h261_get_picture_format
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:51
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:146
ff_init_me
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:305
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:246
AVCodecContext::height
int height
Definition: avcodec.h:562
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:357
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:671
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:102
MpegEncContext::picture_number
int picture_number
Definition: mpegvideo.h:110
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
idctdsp.h
ff_msmpeg4_encode_picture_header
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:218
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:288
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:125
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1300
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:110
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
encode_mb_hq
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2486
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:105
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
mpeg12data.h
ff_set_mpeg4_time
void ff_set_mpeg4_time(MpegEncContext *s)
Definition: mpeg4videoenc.c:873
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:157
ff_fix_long_mvs
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1698
MpegEncContext::dquant
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:193
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:451
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:736
AVCodecContext
main external API structure.
Definition: avcodec.h:389
default_mv_penalty
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:92
Picture::shared
int shared
Definition: mpegpicture.h:90
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:69
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:884
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1361
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:370
MpegEncContext::skip_count
int skip_count
Definition: mpegvideo.h:327
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:97
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: avpacket.c:230
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1178
AVRational::den
int den
Denominator.
Definition: rational.h:60
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:47
select_input_picture
static int select_input_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:1360
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:722
frame_end
static void frame_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:1533
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:1649
temp
else temp
Definition: vf_mcdeint.c:248
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:82
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:857
flvenc.h
av_clip_uint8
#define av_clip_uint8
Definition: common.h:101
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:84
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:103
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:865
shift
static int shift(int a, int b)
Definition: sonic.c:88
desc
const char * desc
Definition: libsvtav1.c:83
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
ff_speedhq_mb_y_order_to_mb
int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.c:268
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
msmpeg4enc.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:661
MpegEncContext::noise_reduction
int noise_reduction
Definition: mpegvideo.h:536
ff_convert_matrix
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:108
packet_internal.h
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
skip_check
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
Definition: mpegvideo_enc.c:1152
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:311
MpegEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideo.h:530
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1228
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
dct_quantize_refine
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4118
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
copy_context_after_encode
static void copy_context_after_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2447
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:242
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:989
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:864
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
mpeg4videodata.h
clip_coeffs
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
Definition: mpegvideo_enc.c:1959
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:883
MAX_B_FRAMES
#define MAX_B_FRAMES
Definition: mpegvideo.h:57
encode_picture
static int encode_picture(MpegEncContext *s, int picture_number)
Definition: mpegvideo_enc.c:3479
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
d
d
Definition: ffmpeg_filter.c:153
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:562
ff_h261_encode_init
av_cold void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:385
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:78
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:328
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:463
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:66
ff_get_2pass_fcode
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:856
h
h
Definition: vp9dsp_template.c:2038
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:137
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:40
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:71
ff_dct_quantize_c
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4470
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
int
int
Definition: ffmpeg_filter.c:153
AVFrame::display_picture_number
int display_picture_number
picture number in display order
Definition: frame.h:456
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:277
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:52
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:62
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:35
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:220
ff_wmv2_encode_mb
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:166
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:759
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:325
pixblockdsp.h
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:104
ff_get_best_fcode
int ff_get_best_fcode(MpegEncContext *s, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1597
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:1018
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:237
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:447
h263.h
write_slice_end
static void write_slice_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:2659
intmath.h