FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
43 #include "avcodec.h"
44 #include "dct.h"
45 #include "encode.h"
46 #include "idctdsp.h"
47 #include "mpeg12.h"
48 #include "mpegvideo.h"
49 #include "mpegvideodata.h"
50 #include "h261.h"
51 #include "h263.h"
52 #include "h263data.h"
53 #include "mjpegenc_common.h"
54 #include "mathops.h"
55 #include "mpegutils.h"
56 #include "mjpegenc.h"
57 #include "speedhqenc.h"
58 #include "msmpeg4.h"
59 #include "pixblockdsp.h"
60 #include "qpeldsp.h"
61 #include "faandct.h"
62 #include "thread.h"
63 #include "aandcttab.h"
64 #include "flv.h"
65 #include "mpeg4video.h"
66 #include "internal.h"
67 #include "bytestream.h"
68 #include "wmv2.h"
69 #include "rv10.h"
70 #include "packet_internal.h"
71 #include <limits.h>
72 #include "sp5x.h"
73 
74 #define QUANT_BIAS_SHIFT 8
75 
76 #define QMAT_SHIFT_MMX 16
77 #define QMAT_SHIFT 21
78 
80 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
81 static int sse_mb(MpegEncContext *s);
82 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
83 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
84 
85 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
86 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
87 
88 static const AVOption mpv_generic_options[] = {
90 #if FF_API_MPEGVIDEO_OPTS
95 #endif
96  { NULL },
97 };
98 
100  .class_name = "generic mpegvideo encoder",
101  .item_name = av_default_item_name,
102  .option = mpv_generic_options,
103  .version = LIBAVUTIL_VERSION_INT,
104 };
105 
106 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
107  uint16_t (*qmat16)[2][64],
108  const uint16_t *quant_matrix,
109  int bias, int qmin, int qmax, int intra)
110 {
111  FDCTDSPContext *fdsp = &s->fdsp;
112  int qscale;
113  int shift = 0;
114 
115  for (qscale = qmin; qscale <= qmax; qscale++) {
116  int i;
117  int qscale2;
118 
119  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
120  else qscale2 = qscale << 1;
121 
122  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
123 #if CONFIG_FAANDCT
124  fdsp->fdct == ff_faandct ||
125 #endif /* CONFIG_FAANDCT */
127  for (i = 0; i < 64; i++) {
128  const int j = s->idsp.idct_permutation[i];
129  int64_t den = (int64_t) qscale2 * quant_matrix[j];
130  /* 16 <= qscale * quant_matrix[i] <= 7905
131  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
132  * 19952 <= x <= 249205026
133  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
134  * 3444240 >= (1 << 36) / (x) >= 275 */
135 
136  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
137  }
138  } else if (fdsp->fdct == ff_fdct_ifast) {
139  for (i = 0; i < 64; i++) {
140  const int j = s->idsp.idct_permutation[i];
141  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
142  /* 16 <= qscale * quant_matrix[i] <= 7905
143  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
144  * 19952 <= x <= 249205026
145  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
146  * 3444240 >= (1 << 36) / (x) >= 275 */
147 
148  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
149  }
150  } else {
151  for (i = 0; i < 64; i++) {
152  const int j = s->idsp.idct_permutation[i];
153  int64_t den = (int64_t) qscale2 * quant_matrix[j];
154  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
155  * Assume x = qscale * quant_matrix[i]
156  * So 16 <= x <= 7905
157  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
158  * so 32768 >= (1 << 19) / (x) >= 67 */
159  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
160  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
161  // (qscale * quant_matrix[i]);
162  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
163 
164  if (qmat16[qscale][0][i] == 0 ||
165  qmat16[qscale][0][i] == 128 * 256)
166  qmat16[qscale][0][i] = 128 * 256 - 1;
167  qmat16[qscale][1][i] =
168  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
169  qmat16[qscale][0][i]);
170  }
171  }
172 
173  for (i = intra; i < 64; i++) {
174  int64_t max = 8191;
175  if (fdsp->fdct == ff_fdct_ifast) {
176  max = (8191LL * ff_aanscales[i]) >> 14;
177  }
178  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
179  shift++;
180  }
181  }
182  }
183  if (shift) {
184  av_log(s->avctx, AV_LOG_INFO,
185  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
186  QMAT_SHIFT - shift);
187  }
188 }
189 
190 static inline void update_qscale(MpegEncContext *s)
191 {
192  if (s->q_scale_type == 1 && 0) {
193  int i;
194  int bestdiff=INT_MAX;
195  int best = 1;
196 
197  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
198  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
199  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
200  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
201  continue;
202  if (diff < bestdiff) {
203  bestdiff = diff;
204  best = i;
205  }
206  }
207  s->qscale = best;
208  } else {
209  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
210  (FF_LAMBDA_SHIFT + 7);
211  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
212  }
213 
214  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
216 }
217 
218 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
219 {
220  int i;
221 
222  if (matrix) {
223  put_bits(pb, 1, 1);
224  for (i = 0; i < 64; i++) {
225  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
226  }
227  } else
228  put_bits(pb, 1, 0);
229 }
230 
231 /**
232  * init s->current_picture.qscale_table from s->lambda_table
233  */
235 {
236  int8_t * const qscale_table = s->current_picture.qscale_table;
237  int i;
238 
239  for (i = 0; i < s->mb_num; i++) {
240  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
241  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
242  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
243  s->avctx->qmax);
244  }
245 }
246 
249 {
250 #define COPY(a) dst->a= src->a
251  COPY(pict_type);
253  COPY(f_code);
254  COPY(b_code);
255  COPY(qscale);
256  COPY(lambda);
257  COPY(lambda2);
260  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
261  COPY(progressive_frame); // FIXME don't set in encode_header
262  COPY(partitioned_frame); // FIXME don't set in encode_header
263 #undef COPY
264 }
265 
266 static void mpv_encode_init_static(void)
267 {
268  for (int i = -16; i < 16; i++)
269  default_fcode_tab[i + MAX_MV] = 1;
270 }
271 
272 /**
273  * Set the given MpegEncContext to defaults for encoding.
274  * the changed fields will not depend upon the prior state of the MpegEncContext.
275  */
277 {
278  static AVOnce init_static_once = AV_ONCE_INIT;
279 
281 
282  ff_thread_once(&init_static_once, mpv_encode_init_static);
283 
284  s->me.mv_penalty = default_mv_penalty;
285  s->fcode_tab = default_fcode_tab;
286 
287  s->input_picture_number = 0;
288  s->picture_in_gop_number = 0;
289 }
290 
292 {
293  if (ARCH_X86)
295 
296  if (CONFIG_H263_ENCODER)
297  ff_h263dsp_init(&s->h263dsp);
298  if (!s->dct_quantize)
299  s->dct_quantize = ff_dct_quantize_c;
300  if (!s->denoise_dct)
301  s->denoise_dct = denoise_dct_c;
302  s->fast_dct_quantize = s->dct_quantize;
303  if (s->avctx->trellis)
304  s->dct_quantize = dct_quantize_trellis_c;
305 
306  return 0;
307 }
308 
309 /* init video encoder */
311 {
313  AVCPBProperties *cpb_props;
314  int i, ret;
315 
317 
318  switch (avctx->pix_fmt) {
319  case AV_PIX_FMT_YUVJ444P:
320  case AV_PIX_FMT_YUV444P:
321  s->chroma_format = CHROMA_444;
322  break;
323  case AV_PIX_FMT_YUVJ422P:
324  case AV_PIX_FMT_YUV422P:
325  s->chroma_format = CHROMA_422;
326  break;
327  case AV_PIX_FMT_YUVJ420P:
328  case AV_PIX_FMT_YUV420P:
329  default:
330  s->chroma_format = CHROMA_420;
331  break;
332  }
333 
335 
336  s->bit_rate = avctx->bit_rate;
337  s->width = avctx->width;
338  s->height = avctx->height;
339  if (avctx->gop_size > 600 &&
342  "keyframe interval too large!, reducing it from %d to %d\n",
343  avctx->gop_size, 600);
344  avctx->gop_size = 600;
345  }
346  s->gop_size = avctx->gop_size;
347  s->avctx = avctx;
349  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
350  "is %d.\n", MAX_B_FRAMES);
352  }
353  s->max_b_frames = avctx->max_b_frames;
354  s->codec_id = avctx->codec->id;
355  s->strict_std_compliance = avctx->strict_std_compliance;
356  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
357  s->rtp_mode = !!s->rtp_payload_size;
358  s->intra_dc_precision = avctx->intra_dc_precision;
359 
360  // workaround some differences between how applications specify dc precision
361  if (s->intra_dc_precision < 0) {
362  s->intra_dc_precision += 8;
363  } else if (s->intra_dc_precision >= 8)
364  s->intra_dc_precision -= 8;
365 
366  if (s->intra_dc_precision < 0) {
368  "intra dc precision must be positive, note some applications use"
369  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
370  return AVERROR(EINVAL);
371  }
372 
374  s->huffman = 0;
375 
376  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
377  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
378  return AVERROR(EINVAL);
379  }
380  s->user_specified_pts = AV_NOPTS_VALUE;
381 
382  if (s->gop_size <= 1) {
383  s->intra_only = 1;
384  s->gop_size = 12;
385  } else {
386  s->intra_only = 0;
387  }
388 
389  /* Fixed QSCALE */
390  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
391 
392  s->adaptive_quant = (avctx->lumi_masking ||
393  avctx->dark_masking ||
396  avctx->p_masking ||
397  s->border_masking ||
398  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
399  !s->fixed_qscale;
400 
401  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
402 
404  switch(avctx->codec_id) {
407  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
408  break;
409  case AV_CODEC_ID_MPEG4:
413  if (avctx->rc_max_rate >= 15000000) {
414  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
415  } else if(avctx->rc_max_rate >= 2000000) {
416  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
417  } else if(avctx->rc_max_rate >= 384000) {
418  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
419  } else
420  avctx->rc_buffer_size = 40;
421  avctx->rc_buffer_size *= 16384;
422  break;
423  }
424  if (avctx->rc_buffer_size) {
425  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
426  }
427  }
428 
429  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
430  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
431  return AVERROR(EINVAL);
432  }
433 
436  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
437  }
438 
440  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
441  return AVERROR(EINVAL);
442  }
443 
445  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
446  return AVERROR(EINVAL);
447  }
448 
449  if (avctx->rc_max_rate &&
453  "impossible bitrate constraints, this will fail\n");
454  }
455 
456  if (avctx->rc_buffer_size &&
457  avctx->bit_rate * (int64_t)avctx->time_base.num >
458  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
459  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
460  return AVERROR(EINVAL);
461  }
462 
463  if (!s->fixed_qscale &&
466  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
468  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
469  if (nbt <= INT_MAX) {
470  avctx->bit_rate_tolerance = nbt;
471  } else
472  avctx->bit_rate_tolerance = INT_MAX;
473  }
474 
475  if (avctx->rc_max_rate &&
477  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
478  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
479  90000LL * (avctx->rc_buffer_size - 1) >
480  avctx->rc_max_rate * 0xFFFFLL) {
482  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
483  "specified vbv buffer is too large for the given bitrate!\n");
484  }
485 
486  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
487  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
488  s->codec_id != AV_CODEC_ID_FLV1) {
489  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
490  return AVERROR(EINVAL);
491  }
492 
493  if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
495  "OBMC is only supported with simple mb decision\n");
496  return AVERROR(EINVAL);
497  }
498 
499  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
500  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
501  return AVERROR(EINVAL);
502  }
503 
504  if (s->max_b_frames &&
505  s->codec_id != AV_CODEC_ID_MPEG4 &&
506  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
507  s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
508  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
509  return AVERROR(EINVAL);
510  }
511  if (s->max_b_frames < 0) {
513  "max b frames must be 0 or positive for mpegvideo based encoders\n");
514  return AVERROR(EINVAL);
515  }
516 
517  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
518  s->codec_id == AV_CODEC_ID_H263 ||
519  s->codec_id == AV_CODEC_ID_H263P) &&
520  (avctx->sample_aspect_ratio.num > 255 ||
521  avctx->sample_aspect_ratio.den > 255)) {
523  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
527  }
528 
529  if ((s->codec_id == AV_CODEC_ID_H263 ||
530  s->codec_id == AV_CODEC_ID_H263P) &&
531  (avctx->width > 2048 ||
532  avctx->height > 1152 )) {
533  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
534  return AVERROR(EINVAL);
535  }
536  if ((s->codec_id == AV_CODEC_ID_H263 ||
537  s->codec_id == AV_CODEC_ID_H263P ||
538  s->codec_id == AV_CODEC_ID_RV20) &&
539  ((avctx->width &3) ||
540  (avctx->height&3) )) {
541  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
542  return AVERROR(EINVAL);
543  }
544 
545  if (s->codec_id == AV_CODEC_ID_RV10 &&
546  (avctx->width &15 ||
547  avctx->height&15 )) {
548  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
549  return AVERROR(EINVAL);
550  }
551 
552  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
553  s->codec_id == AV_CODEC_ID_WMV2) &&
554  avctx->width & 1) {
555  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
556  return AVERROR(EINVAL);
557  }
558 
560  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
561  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
562  return AVERROR(EINVAL);
563  }
564 
565  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
566  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
567  return AVERROR(EINVAL);
568  }
569 
570  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
572  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
573  return AVERROR(EINVAL);
574  }
575 
576  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
577  (s->codec_id == AV_CODEC_ID_AMV ||
578  s->codec_id == AV_CODEC_ID_MJPEG)) {
579  // Used to produce garbage with MJPEG.
581  "QP RD is no longer compatible with MJPEG or AMV\n");
582  return AVERROR(EINVAL);
583  }
584 
585  if (s->scenechange_threshold < 1000000000 &&
588  "closed gop with scene change detection are not supported yet, "
589  "set threshold to 1000000000\n");
590  return AVERROR_PATCHWELCOME;
591  }
592 
594  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
595  s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
597  "low delay forcing is only available for mpeg2, "
598  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
599  return AVERROR(EINVAL);
600  }
601  if (s->max_b_frames != 0) {
603  "B-frames cannot be used with low delay\n");
604  return AVERROR(EINVAL);
605  }
606  }
607 
608  if (s->q_scale_type == 1) {
609  if (avctx->qmax > 28) {
611  "non linear quant only supports qmax <= 28 currently\n");
612  return AVERROR_PATCHWELCOME;
613  }
614  }
615 
616  if (avctx->slices > 1 &&
618  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
619  return AVERROR(EINVAL);
620  }
621 
622  if (avctx->thread_count > 1 &&
623  s->codec_id != AV_CODEC_ID_MPEG4 &&
624  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
625  s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
626  s->codec_id != AV_CODEC_ID_MJPEG &&
627  (s->codec_id != AV_CODEC_ID_H263P)) {
629  "multi threaded encoding not supported by codec\n");
630  return AVERROR_PATCHWELCOME;
631  }
632 
633  if (avctx->thread_count < 1) {
635  "automatic thread number detection not supported by codec, "
636  "patch welcome\n");
637  return AVERROR_PATCHWELCOME;
638  }
639 
640  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
642  "notice: b_frame_strategy only affects the first pass\n");
643  s->b_frame_strategy = 0;
644  }
645 
647  if (i > 1) {
648  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
649  avctx->time_base.den /= i;
650  avctx->time_base.num /= i;
651  //return -1;
652  }
653 
654  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
655  // (a + x * 3 / 8) / x
656  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
657  s->inter_quant_bias = 0;
658  } else {
659  s->intra_quant_bias = 0;
660  // (a - x / 4) / x
661  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
662  }
663 
664  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
665  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
666  return AVERROR(EINVAL);
667  }
668 
669  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
670 
671  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
672  avctx->time_base.den > (1 << 16) - 1) {
674  "timebase %d/%d not supported by MPEG 4 standard, "
675  "the maximum admitted value for the timebase denominator "
676  "is %d\n", avctx->time_base.num, avctx->time_base.den,
677  (1 << 16) - 1);
678  return AVERROR(EINVAL);
679  }
680  s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
681 
682  switch (avctx->codec->id) {
684  s->out_format = FMT_MPEG1;
685  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
686  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
687  break;
689  s->out_format = FMT_MPEG1;
690  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
691  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
692  s->rtp_mode = 1;
693  break;
694 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
695  case AV_CODEC_ID_MJPEG:
696  case AV_CODEC_ID_AMV:
697  s->out_format = FMT_MJPEG;
698  s->intra_only = 1; /* force intra only for jpeg */
699  if ((ret = ff_mjpeg_encode_init(s)) < 0)
700  return ret;
701  avctx->delay = 0;
702  s->low_delay = 1;
703  break;
704 #endif
705  case AV_CODEC_ID_SPEEDHQ:
706  s->out_format = FMT_SPEEDHQ;
707  s->intra_only = 1; /* force intra only for SHQ */
708  if (!CONFIG_SPEEDHQ_ENCODER)
710  if ((ret = ff_speedhq_encode_init(s)) < 0)
711  return ret;
712  avctx->delay = 0;
713  s->low_delay = 1;
714  break;
715  case AV_CODEC_ID_H261:
716  if (!CONFIG_H261_ENCODER)
718  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
720  "The specified picture size of %dx%d is not valid for the "
721  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
722  s->width, s->height);
723  return AVERROR(EINVAL);
724  }
725  s->out_format = FMT_H261;
726  avctx->delay = 0;
727  s->low_delay = 1;
728  s->rtp_mode = 0; /* Sliced encoding not supported */
729  break;
730  case AV_CODEC_ID_H263:
731  if (!CONFIG_H263_ENCODER)
734  s->width, s->height) == 8) {
736  "The specified picture size of %dx%d is not valid for "
737  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
738  "352x288, 704x576, and 1408x1152. "
739  "Try H.263+.\n", s->width, s->height);
740  return AVERROR(EINVAL);
741  }
742  s->out_format = FMT_H263;
743  avctx->delay = 0;
744  s->low_delay = 1;
745  break;
746  case AV_CODEC_ID_H263P:
747  s->out_format = FMT_H263;
748  s->h263_plus = 1;
749  /* Fx */
750  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
751  s->modified_quant = s->h263_aic;
752  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
753  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
754 
755  /* /Fx */
756  /* These are just to be sure */
757  avctx->delay = 0;
758  s->low_delay = 1;
759  break;
760  case AV_CODEC_ID_FLV1:
761  s->out_format = FMT_H263;
762  s->h263_flv = 2; /* format = 1; 11-bit codes */
763  s->unrestricted_mv = 1;
764  s->rtp_mode = 0; /* don't allow GOB */
765  avctx->delay = 0;
766  s->low_delay = 1;
767  break;
768  case AV_CODEC_ID_RV10:
769  s->out_format = FMT_H263;
770  avctx->delay = 0;
771  s->low_delay = 1;
772  break;
773  case AV_CODEC_ID_RV20:
774  s->out_format = FMT_H263;
775  avctx->delay = 0;
776  s->low_delay = 1;
777  s->modified_quant = 1;
778  s->h263_aic = 1;
779  s->h263_plus = 1;
780  s->loop_filter = 1;
781  s->unrestricted_mv = 0;
782  break;
783  case AV_CODEC_ID_MPEG4:
784  s->out_format = FMT_H263;
785  s->h263_pred = 1;
786  s->unrestricted_mv = 1;
787  s->low_delay = s->max_b_frames ? 0 : 1;
788  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
789  break;
791  s->out_format = FMT_H263;
792  s->h263_pred = 1;
793  s->unrestricted_mv = 1;
794  s->msmpeg4_version = 2;
795  avctx->delay = 0;
796  s->low_delay = 1;
797  break;
799  s->out_format = FMT_H263;
800  s->h263_pred = 1;
801  s->unrestricted_mv = 1;
802  s->msmpeg4_version = 3;
803  s->flipflop_rounding = 1;
804  avctx->delay = 0;
805  s->low_delay = 1;
806  break;
807  case AV_CODEC_ID_WMV1:
808  s->out_format = FMT_H263;
809  s->h263_pred = 1;
810  s->unrestricted_mv = 1;
811  s->msmpeg4_version = 4;
812  s->flipflop_rounding = 1;
813  avctx->delay = 0;
814  s->low_delay = 1;
815  break;
816  case AV_CODEC_ID_WMV2:
817  s->out_format = FMT_H263;
818  s->h263_pred = 1;
819  s->unrestricted_mv = 1;
820  s->msmpeg4_version = 5;
821  s->flipflop_rounding = 1;
822  avctx->delay = 0;
823  s->low_delay = 1;
824  break;
825  default:
826  return AVERROR(EINVAL);
827  }
828 
829  avctx->has_b_frames = !s->low_delay;
830 
831  s->encoding = 1;
832 
833  s->progressive_frame =
834  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
836  s->alternate_scan);
837 
838  /* init */
840  if ((ret = ff_mpv_common_init(s)) < 0)
841  return ret;
842 
843  ff_fdctdsp_init(&s->fdsp, avctx);
844  ff_me_cmp_init(&s->mecc, avctx);
845  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
846  ff_pixblockdsp_init(&s->pdsp, avctx);
847  ff_qpeldsp_init(&s->qdsp);
848 
849  if (s->msmpeg4_version) {
850  int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
851  if (!(s->ac_stats = av_mallocz(ac_stats_size)))
852  return AVERROR(ENOMEM);
853  }
854 
855  if (!(avctx->stats_out = av_mallocz(256)) ||
856  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
857  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
858  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
859  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
860  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
861  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
862  !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
863  !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
864  return AVERROR(ENOMEM);
865 
866  if (s->noise_reduction) {
867  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
868  return AVERROR(ENOMEM);
869  }
870 
872 
873  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
874  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
875 
876  if (s->slice_context_count > 1) {
877  s->rtp_mode = 1;
878 
880  s->h263_slice_structured = 1;
881  }
882 
883  s->quant_precision = 5;
884 
885  ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
886  ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
887 
888  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261) {
890  } else if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
891  && s->out_format == FMT_MPEG1) {
893  } else if (CONFIG_H263_ENCODER && s->out_format == FMT_H263) {
895  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
897  }
898 
899  /* init q matrix */
900  for (i = 0; i < 64; i++) {
901  int j = s->idsp.idct_permutation[i];
902  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
903  s->mpeg_quant) {
904  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
905  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
906  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
907  s->intra_matrix[j] =
908  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
909  } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
910  s->intra_matrix[j] =
911  s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
912  } else {
913  /* MPEG-1/2 */
914  s->chroma_intra_matrix[j] =
915  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
916  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
917  }
918  if (avctx->intra_matrix)
919  s->intra_matrix[j] = avctx->intra_matrix[i];
920  if (avctx->inter_matrix)
921  s->inter_matrix[j] = avctx->inter_matrix[i];
922  }
923 
924  /* precompute matrix */
925  /* for mjpeg, we do include qscale in the matrix */
926  if (s->out_format != FMT_MJPEG) {
927  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
928  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
929  31, 1);
930  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
931  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
932  31, 0);
933  }
934 
935  if ((ret = ff_rate_control_init(s)) < 0)
936  return ret;
937 
938  if (s->b_frame_strategy == 2) {
939  for (i = 0; i < s->max_b_frames + 2; i++) {
940  s->tmp_frames[i] = av_frame_alloc();
941  if (!s->tmp_frames[i])
942  return AVERROR(ENOMEM);
943 
944  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
945  s->tmp_frames[i]->width = s->width >> s->brd_scale;
946  s->tmp_frames[i]->height = s->height >> s->brd_scale;
947 
948  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
949  if (ret < 0)
950  return ret;
951  }
952  }
953 
954  cpb_props = ff_add_cpb_side_data(avctx);
955  if (!cpb_props)
956  return AVERROR(ENOMEM);
957  cpb_props->max_bitrate = avctx->rc_max_rate;
958  cpb_props->min_bitrate = avctx->rc_min_rate;
959  cpb_props->avg_bitrate = avctx->bit_rate;
960  cpb_props->buffer_size = avctx->rc_buffer_size;
961 
962  return 0;
963 }
964 
966 {
968  int i;
969 
971 
973  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
974  s->out_format == FMT_MJPEG)
976 
977  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
978  av_frame_free(&s->tmp_frames[i]);
979 
980  ff_free_picture_tables(&s->new_picture);
981  ff_mpeg_unref_picture(avctx, &s->new_picture);
982 
984  av_freep(&s->ac_stats);
985 
986  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
987  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
988  s->q_chroma_intra_matrix= NULL;
989  s->q_chroma_intra_matrix16= NULL;
990  av_freep(&s->q_intra_matrix);
991  av_freep(&s->q_inter_matrix);
992  av_freep(&s->q_intra_matrix16);
993  av_freep(&s->q_inter_matrix16);
994  av_freep(&s->input_picture);
995  av_freep(&s->reordered_input_picture);
996  av_freep(&s->dct_offset);
997 
998  return 0;
999 }
1000 
1001 static int get_sae(uint8_t *src, int ref, int stride)
1002 {
1003  int x,y;
1004  int acc = 0;
1005 
1006  for (y = 0; y < 16; y++) {
1007  for (x = 0; x < 16; x++) {
1008  acc += FFABS(src[x + y * stride] - ref);
1009  }
1010  }
1011 
1012  return acc;
1013 }
1014 
1015 static int get_intra_count(MpegEncContext *s, uint8_t *src,
1016  uint8_t *ref, int stride)
1017 {
1018  int x, y, w, h;
1019  int acc = 0;
1020 
1021  w = s->width & ~15;
1022  h = s->height & ~15;
1023 
1024  for (y = 0; y < h; y += 16) {
1025  for (x = 0; x < w; x += 16) {
1026  int offset = x + y * stride;
1027  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1028  stride, 16);
1029  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1030  int sae = get_sae(src + offset, mean, stride);
1031 
1032  acc += sae + 500 < sad;
1033  }
1034  }
1035  return acc;
1036 }
1037 
1038 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1039 {
1040  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1041  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1042  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1043  &s->linesize, &s->uvlinesize);
1044 }
1045 
1046 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1047 {
1048  Picture *pic = NULL;
1049  int64_t pts;
1050  int i, display_picture_number = 0, ret;
1051  int encoding_delay = s->max_b_frames ? s->max_b_frames
1052  : (s->low_delay ? 0 : 1);
1053  int flush_offset = 1;
1054  int direct = 1;
1055 
1056  if (pic_arg) {
1057  pts = pic_arg->pts;
1058  display_picture_number = s->input_picture_number++;
1059 
1060  if (pts != AV_NOPTS_VALUE) {
1061  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1062  int64_t last = s->user_specified_pts;
1063 
1064  if (pts <= last) {
1065  av_log(s->avctx, AV_LOG_ERROR,
1066  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1067  pts, last);
1068  return AVERROR(EINVAL);
1069  }
1070 
1071  if (!s->low_delay && display_picture_number == 1)
1072  s->dts_delta = pts - last;
1073  }
1074  s->user_specified_pts = pts;
1075  } else {
1076  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1077  s->user_specified_pts =
1078  pts = s->user_specified_pts + 1;
1079  av_log(s->avctx, AV_LOG_INFO,
1080  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1081  pts);
1082  } else {
1083  pts = display_picture_number;
1084  }
1085  }
1086 
1087  if (!pic_arg->buf[0] ||
1088  pic_arg->linesize[0] != s->linesize ||
1089  pic_arg->linesize[1] != s->uvlinesize ||
1090  pic_arg->linesize[2] != s->uvlinesize)
1091  direct = 0;
1092  if ((s->width & 15) || (s->height & 15))
1093  direct = 0;
1094  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1095  direct = 0;
1096  if (s->linesize & (STRIDE_ALIGN-1))
1097  direct = 0;
1098 
1099  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1100  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1101 
1102  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1103  if (i < 0)
1104  return i;
1105 
1106  pic = &s->picture[i];
1107  pic->reference = 3;
1108 
1109  if (direct) {
1110  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1111  return ret;
1112  }
1113  ret = alloc_picture(s, pic, direct);
1114  if (ret < 0)
1115  return ret;
1116 
1117  if (!direct) {
1118  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1119  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1120  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1121  // empty
1122  } else {
1123  int h_chroma_shift, v_chroma_shift;
1124  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1125  &h_chroma_shift,
1126  &v_chroma_shift);
1127 
1128  for (i = 0; i < 3; i++) {
1129  int src_stride = pic_arg->linesize[i];
1130  int dst_stride = i ? s->uvlinesize : s->linesize;
1131  int h_shift = i ? h_chroma_shift : 0;
1132  int v_shift = i ? v_chroma_shift : 0;
1133  int w = s->width >> h_shift;
1134  int h = s->height >> v_shift;
1135  uint8_t *src = pic_arg->data[i];
1136  uint8_t *dst = pic->f->data[i];
1137  int vpad = 16;
1138 
1139  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1140  && !s->progressive_sequence
1141  && FFALIGN(s->height, 32) - s->height > 16)
1142  vpad = 32;
1143 
1144  if (!s->avctx->rc_buffer_size)
1145  dst += INPLACE_OFFSET;
1146 
1147  if (src_stride == dst_stride)
1148  memcpy(dst, src, src_stride * h);
1149  else {
1150  int h2 = h;
1151  uint8_t *dst2 = dst;
1152  while (h2--) {
1153  memcpy(dst2, src, w);
1154  dst2 += dst_stride;
1155  src += src_stride;
1156  }
1157  }
1158  if ((s->width & 15) || (s->height & (vpad-1))) {
1159  s->mpvencdsp.draw_edges(dst, dst_stride,
1160  w, h,
1161  16 >> h_shift,
1162  vpad >> v_shift,
1163  EDGE_BOTTOM);
1164  }
1165  }
1166  emms_c();
1167  }
1168  }
1169  ret = av_frame_copy_props(pic->f, pic_arg);
1170  if (ret < 0)
1171  return ret;
1172 
1173  pic->f->display_picture_number = display_picture_number;
1174  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1175  } else {
1176  /* Flushing: When we have not received enough input frames,
1177  * ensure s->input_picture[0] contains the first picture */
1178  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1179  if (s->input_picture[flush_offset])
1180  break;
1181 
1182  if (flush_offset <= 1)
1183  flush_offset = 1;
1184  else
1185  encoding_delay = encoding_delay - flush_offset + 1;
1186  }
1187 
1188  /* shift buffer entries */
1189  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1190  s->input_picture[i - flush_offset] = s->input_picture[i];
1191 
1192  s->input_picture[encoding_delay] = (Picture*) pic;
1193 
1194  return 0;
1195 }
1196 
1198 {
1199  int x, y, plane;
1200  int score = 0;
1201  int64_t score64 = 0;
1202 
1203  for (plane = 0; plane < 3; plane++) {
1204  const int stride = p->f->linesize[plane];
1205  const int bw = plane ? 1 : 2;
1206  for (y = 0; y < s->mb_height * bw; y++) {
1207  for (x = 0; x < s->mb_width * bw; x++) {
1208  int off = p->shared ? 0 : 16;
1209  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1210  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1211  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1212 
1213  switch (FFABS(s->frame_skip_exp)) {
1214  case 0: score = FFMAX(score, v); break;
1215  case 1: score += FFABS(v); break;
1216  case 2: score64 += v * (int64_t)v; break;
1217  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1218  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1219  }
1220  }
1221  }
1222  }
1223  emms_c();
1224 
1225  if (score)
1226  score64 = score;
1227  if (s->frame_skip_exp < 0)
1228  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1229  -1.0/s->frame_skip_exp);
1230 
1231  if (score64 < s->frame_skip_threshold)
1232  return 1;
1233  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1234  return 1;
1235  return 0;
1236 }
1237 
1239 {
1240  int ret;
1241  int size = 0;
1242 
1244  if (ret < 0)
1245  return ret;
1246 
1247  do {
1249  if (ret >= 0) {
1250  size += pkt->size;
1252  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1253  return ret;
1254  } while (ret >= 0);
1255 
1256  return size;
1257 }
1258 
1260 {
1261  const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1262  AVPacket *pkt;
1263  const int scale = s->brd_scale;
1264  int width = s->width >> scale;
1265  int height = s->height >> scale;
1266  int i, j, out_size, p_lambda, b_lambda, lambda2;
1267  int64_t best_rd = INT64_MAX;
1268  int best_b_count = -1;
1269  int ret = 0;
1270 
1271  av_assert0(scale >= 0 && scale <= 3);
1272 
1273  pkt = av_packet_alloc();
1274  if (!pkt)
1275  return AVERROR(ENOMEM);
1276 
1277  //emms_c();
1278  //s->next_picture_ptr->quality;
1279  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1280  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1281  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1282  if (!b_lambda) // FIXME we should do this somewhere else
1283  b_lambda = p_lambda;
1284  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1286 
1287  for (i = 0; i < s->max_b_frames + 2; i++) {
1288  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1289  s->next_picture_ptr;
1290  uint8_t *data[4];
1291 
1292  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1293  pre_input = *pre_input_ptr;
1294  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1295 
1296  if (!pre_input.shared && i) {
1297  data[0] += INPLACE_OFFSET;
1298  data[1] += INPLACE_OFFSET;
1299  data[2] += INPLACE_OFFSET;
1300  }
1301 
1302  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1303  s->tmp_frames[i]->linesize[0],
1304  data[0],
1305  pre_input.f->linesize[0],
1306  width, height);
1307  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1308  s->tmp_frames[i]->linesize[1],
1309  data[1],
1310  pre_input.f->linesize[1],
1311  width >> 1, height >> 1);
1312  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1313  s->tmp_frames[i]->linesize[2],
1314  data[2],
1315  pre_input.f->linesize[2],
1316  width >> 1, height >> 1);
1317  }
1318  }
1319 
1320  for (j = 0; j < s->max_b_frames + 1; j++) {
1321  AVCodecContext *c;
1322  int64_t rd = 0;
1323 
1324  if (!s->input_picture[j])
1325  break;
1326 
1328  if (!c) {
1329  ret = AVERROR(ENOMEM);
1330  goto fail;
1331  }
1332 
1333  c->width = width;
1334  c->height = height;
1336  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1337  c->mb_decision = s->avctx->mb_decision;
1338  c->me_cmp = s->avctx->me_cmp;
1339  c->mb_cmp = s->avctx->mb_cmp;
1340  c->me_sub_cmp = s->avctx->me_sub_cmp;
1341  c->pix_fmt = AV_PIX_FMT_YUV420P;
1342  c->time_base = s->avctx->time_base;
1343  c->max_b_frames = s->max_b_frames;
1344 
1345  ret = avcodec_open2(c, codec, NULL);
1346  if (ret < 0)
1347  goto fail;
1348 
1349 
1350  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1351  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1352 
1353  out_size = encode_frame(c, s->tmp_frames[0], pkt);
1354  if (out_size < 0) {
1355  ret = out_size;
1356  goto fail;
1357  }
1358 
1359  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1360 
1361  for (i = 0; i < s->max_b_frames + 1; i++) {
1362  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1363 
1364  s->tmp_frames[i + 1]->pict_type = is_p ?
1366  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1367 
1368  out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1369  if (out_size < 0) {
1370  ret = out_size;
1371  goto fail;
1372  }
1373 
1374  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1375  }
1376 
1377  /* get the delayed frames */
1379  if (out_size < 0) {
1380  ret = out_size;
1381  goto fail;
1382  }
1383  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1384 
1385  rd += c->error[0] + c->error[1] + c->error[2];
1386 
1387  if (rd < best_rd) {
1388  best_rd = rd;
1389  best_b_count = j;
1390  }
1391 
1392 fail:
1395  if (ret < 0) {
1396  best_b_count = ret;
1397  break;
1398  }
1399  }
1400 
1401  av_packet_free(&pkt);
1402 
1403  return best_b_count;
1404 }
1405 
1407 {
1408  int i, ret;
1409 
1410  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1411  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1412  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1413 
1414  /* set next picture type & ordering */
1415  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1416  if (s->frame_skip_threshold || s->frame_skip_factor) {
1417  if (s->picture_in_gop_number < s->gop_size &&
1418  s->next_picture_ptr &&
1419  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1420  // FIXME check that the gop check above is +-1 correct
1421  av_frame_unref(s->input_picture[0]->f);
1422 
1423  ff_vbv_update(s, 0);
1424 
1425  goto no_output_pic;
1426  }
1427  }
1428 
1429  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1430  !s->next_picture_ptr || s->intra_only) {
1431  s->reordered_input_picture[0] = s->input_picture[0];
1432  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1433  s->reordered_input_picture[0]->f->coded_picture_number =
1434  s->coded_picture_number++;
1435  } else {
1436  int b_frames = 0;
1437 
1438  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1439  for (i = 0; i < s->max_b_frames + 1; i++) {
1440  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1441 
1442  if (pict_num >= s->rc_context.num_entries)
1443  break;
1444  if (!s->input_picture[i]) {
1445  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1446  break;
1447  }
1448 
1449  s->input_picture[i]->f->pict_type =
1450  s->rc_context.entry[pict_num].new_pict_type;
1451  }
1452  }
1453 
1454  if (s->b_frame_strategy == 0) {
1455  b_frames = s->max_b_frames;
1456  while (b_frames && !s->input_picture[b_frames])
1457  b_frames--;
1458  } else if (s->b_frame_strategy == 1) {
1459  for (i = 1; i < s->max_b_frames + 1; i++) {
1460  if (s->input_picture[i] &&
1461  s->input_picture[i]->b_frame_score == 0) {
1462  s->input_picture[i]->b_frame_score =
1464  s->input_picture[i ]->f->data[0],
1465  s->input_picture[i - 1]->f->data[0],
1466  s->linesize) + 1;
1467  }
1468  }
1469  for (i = 0; i < s->max_b_frames + 1; i++) {
1470  if (!s->input_picture[i] ||
1471  s->input_picture[i]->b_frame_score - 1 >
1472  s->mb_num / s->b_sensitivity)
1473  break;
1474  }
1475 
1476  b_frames = FFMAX(0, i - 1);
1477 
1478  /* reset scores */
1479  for (i = 0; i < b_frames + 1; i++) {
1480  s->input_picture[i]->b_frame_score = 0;
1481  }
1482  } else if (s->b_frame_strategy == 2) {
1483  b_frames = estimate_best_b_count(s);
1484  if (b_frames < 0)
1485  return b_frames;
1486  }
1487 
1488  emms_c();
1489 
1490  for (i = b_frames - 1; i >= 0; i--) {
1491  int type = s->input_picture[i]->f->pict_type;
1492  if (type && type != AV_PICTURE_TYPE_B)
1493  b_frames = i;
1494  }
1495  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1496  b_frames == s->max_b_frames) {
1497  av_log(s->avctx, AV_LOG_ERROR,
1498  "warning, too many B-frames in a row\n");
1499  }
1500 
1501  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1502  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1503  s->gop_size > s->picture_in_gop_number) {
1504  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1505  } else {
1506  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1507  b_frames = 0;
1508  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1509  }
1510  }
1511 
1512  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1513  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1514  b_frames--;
1515 
1516  s->reordered_input_picture[0] = s->input_picture[b_frames];
1517  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1518  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1519  s->reordered_input_picture[0]->f->coded_picture_number =
1520  s->coded_picture_number++;
1521  for (i = 0; i < b_frames; i++) {
1522  s->reordered_input_picture[i + 1] = s->input_picture[i];
1523  s->reordered_input_picture[i + 1]->f->pict_type =
1525  s->reordered_input_picture[i + 1]->f->coded_picture_number =
1526  s->coded_picture_number++;
1527  }
1528  }
1529  }
1530 no_output_pic:
1531  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1532 
1533  if (s->reordered_input_picture[0]) {
1534  s->reordered_input_picture[0]->reference =
1535  s->reordered_input_picture[0]->f->pict_type !=
1536  AV_PICTURE_TYPE_B ? 3 : 0;
1537 
1538  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1539  return ret;
1540 
1541  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1542  // input is a shared pix, so we can't modify it -> allocate a new
1543  // one & ensure that the shared one is reuseable
1544 
1545  Picture *pic;
1546  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1547  if (i < 0)
1548  return i;
1549  pic = &s->picture[i];
1550 
1551  pic->reference = s->reordered_input_picture[0]->reference;
1552  if (alloc_picture(s, pic, 0) < 0) {
1553  return -1;
1554  }
1555 
1556  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1557  if (ret < 0)
1558  return ret;
1559 
1560  /* mark us unused / free shared pic */
1561  av_frame_unref(s->reordered_input_picture[0]->f);
1562  s->reordered_input_picture[0]->shared = 0;
1563 
1564  s->current_picture_ptr = pic;
1565  } else {
1566  // input is not a shared pix -> reuse buffer for current_pix
1567  s->current_picture_ptr = s->reordered_input_picture[0];
1568  for (i = 0; i < 4; i++) {
1569  if (s->new_picture.f->data[i])
1570  s->new_picture.f->data[i] += INPLACE_OFFSET;
1571  }
1572  }
1573  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1574  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1575  s->current_picture_ptr)) < 0)
1576  return ret;
1577 
1578  s->picture_number = s->new_picture.f->display_picture_number;
1579  }
1580  return 0;
1581 }
1582 
1584 {
1585  if (s->unrestricted_mv &&
1586  s->current_picture.reference &&
1587  !s->intra_only) {
1588  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1589  int hshift = desc->log2_chroma_w;
1590  int vshift = desc->log2_chroma_h;
1591  s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1592  s->current_picture.f->linesize[0],
1593  s->h_edge_pos, s->v_edge_pos,
1595  EDGE_TOP | EDGE_BOTTOM);
1596  s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1597  s->current_picture.f->linesize[1],
1598  s->h_edge_pos >> hshift,
1599  s->v_edge_pos >> vshift,
1600  EDGE_WIDTH >> hshift,
1601  EDGE_WIDTH >> vshift,
1602  EDGE_TOP | EDGE_BOTTOM);
1603  s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1604  s->current_picture.f->linesize[2],
1605  s->h_edge_pos >> hshift,
1606  s->v_edge_pos >> vshift,
1607  EDGE_WIDTH >> hshift,
1608  EDGE_WIDTH >> vshift,
1609  EDGE_TOP | EDGE_BOTTOM);
1610  }
1611 
1612  emms_c();
1613 
1614  s->last_pict_type = s->pict_type;
1615  s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1616  if (s->pict_type!= AV_PICTURE_TYPE_B)
1617  s->last_non_b_pict_type = s->pict_type;
1618 }
1619 
1621 {
1622  int intra, i;
1623 
1624  for (intra = 0; intra < 2; intra++) {
1625  if (s->dct_count[intra] > (1 << 16)) {
1626  for (i = 0; i < 64; i++) {
1627  s->dct_error_sum[intra][i] >>= 1;
1628  }
1629  s->dct_count[intra] >>= 1;
1630  }
1631 
1632  for (i = 0; i < 64; i++) {
1633  s->dct_offset[intra][i] = (s->noise_reduction *
1634  s->dct_count[intra] +
1635  s->dct_error_sum[intra][i] / 2) /
1636  (s->dct_error_sum[intra][i] + 1);
1637  }
1638  }
1639 }
1640 
1642 {
1643  int ret;
1644 
1645  /* mark & release old frames */
1646  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1647  s->last_picture_ptr != s->next_picture_ptr &&
1648  s->last_picture_ptr->f->buf[0]) {
1649  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1650  }
1651 
1652  s->current_picture_ptr->f->pict_type = s->pict_type;
1653  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1654 
1655  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1656  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1657  s->current_picture_ptr)) < 0)
1658  return ret;
1659 
1660  if (s->pict_type != AV_PICTURE_TYPE_B) {
1661  s->last_picture_ptr = s->next_picture_ptr;
1662  s->next_picture_ptr = s->current_picture_ptr;
1663  }
1664 
1665  if (s->last_picture_ptr) {
1666  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1667  if (s->last_picture_ptr->f->buf[0] &&
1668  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1669  s->last_picture_ptr)) < 0)
1670  return ret;
1671  }
1672  if (s->next_picture_ptr) {
1673  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1674  if (s->next_picture_ptr->f->buf[0] &&
1675  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1676  s->next_picture_ptr)) < 0)
1677  return ret;
1678  }
1679 
1680  if (s->picture_structure!= PICT_FRAME) {
1681  int i;
1682  for (i = 0; i < 4; i++) {
1683  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1684  s->current_picture.f->data[i] +=
1685  s->current_picture.f->linesize[i];
1686  }
1687  s->current_picture.f->linesize[i] *= 2;
1688  s->last_picture.f->linesize[i] *= 2;
1689  s->next_picture.f->linesize[i] *= 2;
1690  }
1691  }
1692 
1693  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1694  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1695  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1696  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1697  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1698  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1699  } else {
1700  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1701  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1702  }
1703 
1704  if (s->dct_error_sum) {
1705  av_assert2(s->noise_reduction && s->encoding);
1707  }
1708 
1709  return 0;
1710 }
1711 
1713  const AVFrame *pic_arg, int *got_packet)
1714 {
1716  int i, stuffing_count, ret;
1717  int context_count = s->slice_context_count;
1718 
1719  s->vbv_ignore_qmax = 0;
1720 
1721  s->picture_in_gop_number++;
1722 
1723  if (load_input_picture(s, pic_arg) < 0)
1724  return -1;
1725 
1726  if (select_input_picture(s) < 0) {
1727  return -1;
1728  }
1729 
1730  /* output? */
1731  if (s->new_picture.f->data[0]) {
1732  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1733  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1734  :
1735  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1736  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1737  return ret;
1738  if (s->mb_info) {
1739  s->mb_info_ptr = av_packet_new_side_data(pkt,
1741  s->mb_width*s->mb_height*12);
1742  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1743  }
1744 
1745  for (i = 0; i < context_count; i++) {
1746  int start_y = s->thread_context[i]->start_mb_y;
1747  int end_y = s->thread_context[i]-> end_mb_y;
1748  int h = s->mb_height;
1749  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1750  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1751 
1752  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1753  }
1754 
1755  s->pict_type = s->new_picture.f->pict_type;
1756  //emms_c();
1757  ret = frame_start(s);
1758  if (ret < 0)
1759  return ret;
1760 vbv_retry:
1761  ret = encode_picture(s, s->picture_number);
1762  if (growing_buffer) {
1763  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1764  pkt->data = s->pb.buf;
1766  }
1767  if (ret < 0)
1768  return -1;
1769 
1770  frame_end(s);
1771 
1772  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1773  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1774 
1775  if (avctx->rc_buffer_size) {
1776  RateControlContext *rcc = &s->rc_context;
1777  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1778  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1779  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1780 
1781  if (put_bits_count(&s->pb) > max_size &&
1782  s->lambda < s->lmax) {
1783  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1784  (s->qscale + 1) / s->qscale);
1785  if (s->adaptive_quant) {
1786  int i;
1787  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1788  s->lambda_table[i] =
1789  FFMAX(s->lambda_table[i] + min_step,
1790  s->lambda_table[i] * (s->qscale + 1) /
1791  s->qscale);
1792  }
1793  s->mb_skipped = 0; // done in frame_start()
1794  // done in encode_picture() so we must undo it
1795  if (s->pict_type == AV_PICTURE_TYPE_P) {
1796  if (s->flipflop_rounding ||
1797  s->codec_id == AV_CODEC_ID_H263P ||
1798  s->codec_id == AV_CODEC_ID_MPEG4)
1799  s->no_rounding ^= 1;
1800  }
1801  if (s->pict_type != AV_PICTURE_TYPE_B) {
1802  s->time_base = s->last_time_base;
1803  s->last_non_b_time = s->time - s->pp_time;
1804  }
1805  for (i = 0; i < context_count; i++) {
1806  PutBitContext *pb = &s->thread_context[i]->pb;
1807  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1808  }
1809  s->vbv_ignore_qmax = 1;
1810  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1811  goto vbv_retry;
1812  }
1813 
1815  }
1816 
1819 
1820  for (i = 0; i < 4; i++) {
1821  s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1822  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1823  }
1824  ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1825  s->current_picture_ptr->encoding_error,
1826  (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1827  s->pict_type);
1828 
1830  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1831  s->misc_bits + s->i_tex_bits +
1832  s->p_tex_bits);
1833  flush_put_bits(&s->pb);
1834  s->frame_bits = put_bits_count(&s->pb);
1835 
1836  stuffing_count = ff_vbv_update(s, s->frame_bits);
1837  s->stuffing_bits = 8*stuffing_count;
1838  if (stuffing_count) {
1839  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1840  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1841  return -1;
1842  }
1843 
1844  switch (s->codec_id) {
1847  while (stuffing_count--) {
1848  put_bits(&s->pb, 8, 0);
1849  }
1850  break;
1851  case AV_CODEC_ID_MPEG4:
1852  put_bits(&s->pb, 16, 0);
1853  put_bits(&s->pb, 16, 0x1C3);
1854  stuffing_count -= 4;
1855  while (stuffing_count--) {
1856  put_bits(&s->pb, 8, 0xFF);
1857  }
1858  break;
1859  default:
1860  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1861  s->stuffing_bits = 0;
1862  }
1863  flush_put_bits(&s->pb);
1864  s->frame_bits = put_bits_count(&s->pb);
1865  }
1866 
1867  /* update MPEG-1/2 vbv_delay for CBR */
1868  if (avctx->rc_max_rate &&
1870  s->out_format == FMT_MPEG1 &&
1871  90000LL * (avctx->rc_buffer_size - 1) <=
1872  avctx->rc_max_rate * 0xFFFFLL) {
1873  AVCPBProperties *props;
1874  size_t props_size;
1875 
1876  int vbv_delay, min_delay;
1877  double inbits = avctx->rc_max_rate *
1879  int minbits = s->frame_bits - 8 *
1880  (s->vbv_delay_ptr - s->pb.buf - 1);
1881  double bits = s->rc_context.buffer_index + minbits - inbits;
1882 
1883  if (bits < 0)
1885  "Internal error, negative bits\n");
1886 
1887  av_assert1(s->repeat_first_field == 0);
1888 
1889  vbv_delay = bits * 90000 / avctx->rc_max_rate;
1890  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1891  avctx->rc_max_rate;
1892 
1893  vbv_delay = FFMAX(vbv_delay, min_delay);
1894 
1895  av_assert0(vbv_delay < 0xFFFF);
1896 
1897  s->vbv_delay_ptr[0] &= 0xF8;
1898  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1899  s->vbv_delay_ptr[1] = vbv_delay >> 5;
1900  s->vbv_delay_ptr[2] &= 0x07;
1901  s->vbv_delay_ptr[2] |= vbv_delay << 3;
1902 
1903  props = av_cpb_properties_alloc(&props_size);
1904  if (!props)
1905  return AVERROR(ENOMEM);
1906  props->vbv_delay = vbv_delay * 300;
1907 
1909  (uint8_t*)props, props_size);
1910  if (ret < 0) {
1911  av_freep(&props);
1912  return ret;
1913  }
1914  }
1915  s->total_bits += s->frame_bits;
1916 
1917  pkt->pts = s->current_picture.f->pts;
1918  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1919  if (!s->current_picture.f->coded_picture_number)
1920  pkt->dts = pkt->pts - s->dts_delta;
1921  else
1922  pkt->dts = s->reordered_pts;
1923  s->reordered_pts = pkt->pts;
1924  } else
1925  pkt->dts = pkt->pts;
1926  if (s->current_picture.f->key_frame)
1928  if (s->mb_info)
1930  } else {
1931  s->frame_bits = 0;
1932  }
1933 
1934  /* release non-reference frames */
1935  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1936  if (!s->picture[i].reference)
1937  ff_mpeg_unref_picture(avctx, &s->picture[i]);
1938  }
1939 
1940  av_assert1((s->frame_bits & 7) == 0);
1941 
1942  pkt->size = s->frame_bits / 8;
1943  *got_packet = !!pkt->size;
1944  return 0;
1945 }
1946 
1948  int n, int threshold)
1949 {
1950  static const char tab[64] = {
1951  3, 2, 2, 1, 1, 1, 1, 1,
1952  1, 1, 1, 1, 1, 1, 1, 1,
1953  1, 1, 1, 1, 1, 1, 1, 1,
1954  0, 0, 0, 0, 0, 0, 0, 0,
1955  0, 0, 0, 0, 0, 0, 0, 0,
1956  0, 0, 0, 0, 0, 0, 0, 0,
1957  0, 0, 0, 0, 0, 0, 0, 0,
1958  0, 0, 0, 0, 0, 0, 0, 0
1959  };
1960  int score = 0;
1961  int run = 0;
1962  int i;
1963  int16_t *block = s->block[n];
1964  const int last_index = s->block_last_index[n];
1965  int skip_dc;
1966 
1967  if (threshold < 0) {
1968  skip_dc = 0;
1969  threshold = -threshold;
1970  } else
1971  skip_dc = 1;
1972 
1973  /* Are all we could set to zero already zero? */
1974  if (last_index <= skip_dc - 1)
1975  return;
1976 
1977  for (i = 0; i <= last_index; i++) {
1978  const int j = s->intra_scantable.permutated[i];
1979  const int level = FFABS(block[j]);
1980  if (level == 1) {
1981  if (skip_dc && i == 0)
1982  continue;
1983  score += tab[run];
1984  run = 0;
1985  } else if (level > 1) {
1986  return;
1987  } else {
1988  run++;
1989  }
1990  }
1991  if (score >= threshold)
1992  return;
1993  for (i = skip_dc; i <= last_index; i++) {
1994  const int j = s->intra_scantable.permutated[i];
1995  block[j] = 0;
1996  }
1997  if (block[0])
1998  s->block_last_index[n] = 0;
1999  else
2000  s->block_last_index[n] = -1;
2001 }
2002 
2003 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2004  int last_index)
2005 {
2006  int i;
2007  const int maxlevel = s->max_qcoeff;
2008  const int minlevel = s->min_qcoeff;
2009  int overflow = 0;
2010 
2011  if (s->mb_intra) {
2012  i = 1; // skip clipping of intra dc
2013  } else
2014  i = 0;
2015 
2016  for (; i <= last_index; i++) {
2017  const int j = s->intra_scantable.permutated[i];
2018  int level = block[j];
2019 
2020  if (level > maxlevel) {
2021  level = maxlevel;
2022  overflow++;
2023  } else if (level < minlevel) {
2024  level = minlevel;
2025  overflow++;
2026  }
2027 
2028  block[j] = level;
2029  }
2030 
2031  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2032  av_log(s->avctx, AV_LOG_INFO,
2033  "warning, clipping %d dct coefficients to %d..%d\n",
2034  overflow, minlevel, maxlevel);
2035 }
2036 
2037 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2038 {
2039  int x, y;
2040  // FIXME optimize
2041  for (y = 0; y < 8; y++) {
2042  for (x = 0; x < 8; x++) {
2043  int x2, y2;
2044  int sum = 0;
2045  int sqr = 0;
2046  int count = 0;
2047 
2048  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2049  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2050  int v = ptr[x2 + y2 * stride];
2051  sum += v;
2052  sqr += v * v;
2053  count++;
2054  }
2055  }
2056  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2057  }
2058  }
2059 }
2060 
2062  int motion_x, int motion_y,
2063  int mb_block_height,
2064  int mb_block_width,
2065  int mb_block_count)
2066 {
2067  int16_t weight[12][64];
2068  int16_t orig[12][64];
2069  const int mb_x = s->mb_x;
2070  const int mb_y = s->mb_y;
2071  int i;
2072  int skip_dct[12];
2073  int dct_offset = s->linesize * 8; // default for progressive frames
2074  int uv_dct_offset = s->uvlinesize * 8;
2075  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2076  ptrdiff_t wrap_y, wrap_c;
2077 
2078  for (i = 0; i < mb_block_count; i++)
2079  skip_dct[i] = s->skipdct;
2080 
2081  if (s->adaptive_quant) {
2082  const int last_qp = s->qscale;
2083  const int mb_xy = mb_x + mb_y * s->mb_stride;
2084 
2085  s->lambda = s->lambda_table[mb_xy];
2086  update_qscale(s);
2087 
2088  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2089  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2090  s->dquant = s->qscale - last_qp;
2091 
2092  if (s->out_format == FMT_H263) {
2093  s->dquant = av_clip(s->dquant, -2, 2);
2094 
2095  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2096  if (!s->mb_intra) {
2097  if (s->pict_type == AV_PICTURE_TYPE_B) {
2098  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2099  s->dquant = 0;
2100  }
2101  if (s->mv_type == MV_TYPE_8X8)
2102  s->dquant = 0;
2103  }
2104  }
2105  }
2106  }
2107  ff_set_qscale(s, last_qp + s->dquant);
2108  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2109  ff_set_qscale(s, s->qscale + s->dquant);
2110 
2111  wrap_y = s->linesize;
2112  wrap_c = s->uvlinesize;
2113  ptr_y = s->new_picture.f->data[0] +
2114  (mb_y * 16 * wrap_y) + mb_x * 16;
2115  ptr_cb = s->new_picture.f->data[1] +
2116  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2117  ptr_cr = s->new_picture.f->data[2] +
2118  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2119 
2120  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2121  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2122  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2123  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2124  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2125  wrap_y, wrap_y,
2126  16, 16, mb_x * 16, mb_y * 16,
2127  s->width, s->height);
2128  ptr_y = ebuf;
2129  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2130  wrap_c, wrap_c,
2131  mb_block_width, mb_block_height,
2132  mb_x * mb_block_width, mb_y * mb_block_height,
2133  cw, ch);
2134  ptr_cb = ebuf + 16 * wrap_y;
2135  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2136  wrap_c, wrap_c,
2137  mb_block_width, mb_block_height,
2138  mb_x * mb_block_width, mb_y * mb_block_height,
2139  cw, ch);
2140  ptr_cr = ebuf + 16 * wrap_y + 16;
2141  }
2142 
2143  if (s->mb_intra) {
2144  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2145  int progressive_score, interlaced_score;
2146 
2147  s->interlaced_dct = 0;
2148  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2149  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2150  NULL, wrap_y, 8) - 400;
2151 
2152  if (progressive_score > 0) {
2153  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2154  NULL, wrap_y * 2, 8) +
2155  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2156  NULL, wrap_y * 2, 8);
2157  if (progressive_score > interlaced_score) {
2158  s->interlaced_dct = 1;
2159 
2160  dct_offset = wrap_y;
2161  uv_dct_offset = wrap_c;
2162  wrap_y <<= 1;
2163  if (s->chroma_format == CHROMA_422 ||
2164  s->chroma_format == CHROMA_444)
2165  wrap_c <<= 1;
2166  }
2167  }
2168  }
2169 
2170  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2171  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2172  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2173  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2174 
2175  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2176  skip_dct[4] = 1;
2177  skip_dct[5] = 1;
2178  } else {
2179  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2180  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2181  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2182  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2183  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2184  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2185  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2186  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2187  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2188  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2189  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2190  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2191  }
2192  }
2193  } else {
2194  op_pixels_func (*op_pix)[4];
2195  qpel_mc_func (*op_qpix)[16];
2196  uint8_t *dest_y, *dest_cb, *dest_cr;
2197 
2198  dest_y = s->dest[0];
2199  dest_cb = s->dest[1];
2200  dest_cr = s->dest[2];
2201 
2202  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2203  op_pix = s->hdsp.put_pixels_tab;
2204  op_qpix = s->qdsp.put_qpel_pixels_tab;
2205  } else {
2206  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2207  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2208  }
2209 
2210  if (s->mv_dir & MV_DIR_FORWARD) {
2211  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2212  s->last_picture.f->data,
2213  op_pix, op_qpix);
2214  op_pix = s->hdsp.avg_pixels_tab;
2215  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2216  }
2217  if (s->mv_dir & MV_DIR_BACKWARD) {
2218  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2219  s->next_picture.f->data,
2220  op_pix, op_qpix);
2221  }
2222 
2223  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2224  int progressive_score, interlaced_score;
2225 
2226  s->interlaced_dct = 0;
2227  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2228  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2229  ptr_y + wrap_y * 8,
2230  wrap_y, 8) - 400;
2231 
2232  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2233  progressive_score -= 400;
2234 
2235  if (progressive_score > 0) {
2236  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2237  wrap_y * 2, 8) +
2238  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2239  ptr_y + wrap_y,
2240  wrap_y * 2, 8);
2241 
2242  if (progressive_score > interlaced_score) {
2243  s->interlaced_dct = 1;
2244 
2245  dct_offset = wrap_y;
2246  uv_dct_offset = wrap_c;
2247  wrap_y <<= 1;
2248  if (s->chroma_format == CHROMA_422)
2249  wrap_c <<= 1;
2250  }
2251  }
2252  }
2253 
2254  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2255  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2256  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2257  dest_y + dct_offset, wrap_y);
2258  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2259  dest_y + dct_offset + 8, wrap_y);
2260 
2261  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2262  skip_dct[4] = 1;
2263  skip_dct[5] = 1;
2264  } else {
2265  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2266  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2267  if (!s->chroma_y_shift) { /* 422 */
2268  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2269  dest_cb + uv_dct_offset, wrap_c);
2270  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2271  dest_cr + uv_dct_offset, wrap_c);
2272  }
2273  }
2274  /* pre quantization */
2275  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2276  2 * s->qscale * s->qscale) {
2277  // FIXME optimize
2278  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2279  skip_dct[0] = 1;
2280  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2281  skip_dct[1] = 1;
2282  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2283  wrap_y, 8) < 20 * s->qscale)
2284  skip_dct[2] = 1;
2285  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2286  wrap_y, 8) < 20 * s->qscale)
2287  skip_dct[3] = 1;
2288  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2289  skip_dct[4] = 1;
2290  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2291  skip_dct[5] = 1;
2292  if (!s->chroma_y_shift) { /* 422 */
2293  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2294  dest_cb + uv_dct_offset,
2295  wrap_c, 8) < 20 * s->qscale)
2296  skip_dct[6] = 1;
2297  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2298  dest_cr + uv_dct_offset,
2299  wrap_c, 8) < 20 * s->qscale)
2300  skip_dct[7] = 1;
2301  }
2302  }
2303  }
2304 
2305  if (s->quantizer_noise_shaping) {
2306  if (!skip_dct[0])
2307  get_visual_weight(weight[0], ptr_y , wrap_y);
2308  if (!skip_dct[1])
2309  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2310  if (!skip_dct[2])
2311  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2312  if (!skip_dct[3])
2313  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2314  if (!skip_dct[4])
2315  get_visual_weight(weight[4], ptr_cb , wrap_c);
2316  if (!skip_dct[5])
2317  get_visual_weight(weight[5], ptr_cr , wrap_c);
2318  if (!s->chroma_y_shift) { /* 422 */
2319  if (!skip_dct[6])
2320  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2321  wrap_c);
2322  if (!skip_dct[7])
2323  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2324  wrap_c);
2325  }
2326  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2327  }
2328 
2329  /* DCT & quantize */
2330  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2331  {
2332  for (i = 0; i < mb_block_count; i++) {
2333  if (!skip_dct[i]) {
2334  int overflow;
2335  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2336  // FIXME we could decide to change to quantizer instead of
2337  // clipping
2338  // JS: I don't think that would be a good idea it could lower
2339  // quality instead of improve it. Just INTRADC clipping
2340  // deserves changes in quantizer
2341  if (overflow)
2342  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2343  } else
2344  s->block_last_index[i] = -1;
2345  }
2346  if (s->quantizer_noise_shaping) {
2347  for (i = 0; i < mb_block_count; i++) {
2348  if (!skip_dct[i]) {
2349  s->block_last_index[i] =
2350  dct_quantize_refine(s, s->block[i], weight[i],
2351  orig[i], i, s->qscale);
2352  }
2353  }
2354  }
2355 
2356  if (s->luma_elim_threshold && !s->mb_intra)
2357  for (i = 0; i < 4; i++)
2358  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2359  if (s->chroma_elim_threshold && !s->mb_intra)
2360  for (i = 4; i < mb_block_count; i++)
2361  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2362 
2363  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2364  for (i = 0; i < mb_block_count; i++) {
2365  if (s->block_last_index[i] == -1)
2366  s->coded_score[i] = INT_MAX / 256;
2367  }
2368  }
2369  }
2370 
2371  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2372  s->block_last_index[4] =
2373  s->block_last_index[5] = 0;
2374  s->block[4][0] =
2375  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2376  if (!s->chroma_y_shift) { /* 422 / 444 */
2377  for (i=6; i<12; i++) {
2378  s->block_last_index[i] = 0;
2379  s->block[i][0] = s->block[4][0];
2380  }
2381  }
2382  }
2383 
2384  // non c quantize code returns incorrect block_last_index FIXME
2385  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2386  for (i = 0; i < mb_block_count; i++) {
2387  int j;
2388  if (s->block_last_index[i] > 0) {
2389  for (j = 63; j > 0; j--) {
2390  if (s->block[i][s->intra_scantable.permutated[j]])
2391  break;
2392  }
2393  s->block_last_index[i] = j;
2394  }
2395  }
2396  }
2397 
2398  /* huffman encode */
2399  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2402  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2403  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2404  break;
2405  case AV_CODEC_ID_MPEG4:
2406  if (CONFIG_MPEG4_ENCODER)
2407  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2408  break;
2409  case AV_CODEC_ID_MSMPEG4V2:
2410  case AV_CODEC_ID_MSMPEG4V3:
2411  case AV_CODEC_ID_WMV1:
2413  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2414  break;
2415  case AV_CODEC_ID_WMV2:
2416  if (CONFIG_WMV2_ENCODER)
2417  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2418  break;
2419  case AV_CODEC_ID_H261:
2420  if (CONFIG_H261_ENCODER)
2421  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2422  break;
2423  case AV_CODEC_ID_H263:
2424  case AV_CODEC_ID_H263P:
2425  case AV_CODEC_ID_FLV1:
2426  case AV_CODEC_ID_RV10:
2427  case AV_CODEC_ID_RV20:
2428  if (CONFIG_H263_ENCODER)
2429  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2430  break;
2431 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2432  case AV_CODEC_ID_MJPEG:
2433  case AV_CODEC_ID_AMV:
2434  ff_mjpeg_encode_mb(s, s->block);
2435  break;
2436 #endif
2437  case AV_CODEC_ID_SPEEDHQ:
2438  if (CONFIG_SPEEDHQ_ENCODER)
2439  ff_speedhq_encode_mb(s, s->block);
2440  break;
2441  default:
2442  av_assert1(0);
2443  }
2444 }
2445 
2446 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2447 {
2448  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2449  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2450  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2451 }
2452 
2454  int i;
2455 
2456  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2457 
2458  /* MPEG-1 */
2459  d->mb_skip_run= s->mb_skip_run;
2460  for(i=0; i<3; i++)
2461  d->last_dc[i] = s->last_dc[i];
2462 
2463  /* statistics */
2464  d->mv_bits= s->mv_bits;
2465  d->i_tex_bits= s->i_tex_bits;
2466  d->p_tex_bits= s->p_tex_bits;
2467  d->i_count= s->i_count;
2468  d->f_count= s->f_count;
2469  d->b_count= s->b_count;
2470  d->skip_count= s->skip_count;
2471  d->misc_bits= s->misc_bits;
2472  d->last_bits= 0;
2473 
2474  d->mb_skipped= 0;
2475  d->qscale= s->qscale;
2476  d->dquant= s->dquant;
2477 
2478  d->esc3_level_length= s->esc3_level_length;
2479 }
2480 
2482  int i;
2483 
2484  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2485  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2486 
2487  /* MPEG-1 */
2488  d->mb_skip_run= s->mb_skip_run;
2489  for(i=0; i<3; i++)
2490  d->last_dc[i] = s->last_dc[i];
2491 
2492  /* statistics */
2493  d->mv_bits= s->mv_bits;
2494  d->i_tex_bits= s->i_tex_bits;
2495  d->p_tex_bits= s->p_tex_bits;
2496  d->i_count= s->i_count;
2497  d->f_count= s->f_count;
2498  d->b_count= s->b_count;
2499  d->skip_count= s->skip_count;
2500  d->misc_bits= s->misc_bits;
2501 
2502  d->mb_intra= s->mb_intra;
2503  d->mb_skipped= s->mb_skipped;
2504  d->mv_type= s->mv_type;
2505  d->mv_dir= s->mv_dir;
2506  d->pb= s->pb;
2507  if(s->data_partitioning){
2508  d->pb2= s->pb2;
2509  d->tex_pb= s->tex_pb;
2510  }
2511  d->block= s->block;
2512  for(i=0; i<8; i++)
2513  d->block_last_index[i]= s->block_last_index[i];
2514  d->interlaced_dct= s->interlaced_dct;
2515  d->qscale= s->qscale;
2516 
2517  d->esc3_level_length= s->esc3_level_length;
2518 }
2519 
2520 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2522  int *dmin, int *next_block, int motion_x, int motion_y)
2523 {
2524  int score;
2525  uint8_t *dest_backup[3];
2526 
2527  copy_context_before_encode(s, backup, type);
2528 
2529  s->block= s->blocks[*next_block];
2530  s->pb= pb[*next_block];
2531  if(s->data_partitioning){
2532  s->pb2 = pb2 [*next_block];
2533  s->tex_pb= tex_pb[*next_block];
2534  }
2535 
2536  if(*next_block){
2537  memcpy(dest_backup, s->dest, sizeof(s->dest));
2538  s->dest[0] = s->sc.rd_scratchpad;
2539  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2540  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2541  av_assert0(s->linesize >= 32); //FIXME
2542  }
2543 
2544  encode_mb(s, motion_x, motion_y);
2545 
2546  score= put_bits_count(&s->pb);
2547  if(s->data_partitioning){
2548  score+= put_bits_count(&s->pb2);
2549  score+= put_bits_count(&s->tex_pb);
2550  }
2551 
2552  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2553  ff_mpv_reconstruct_mb(s, s->block);
2554 
2555  score *= s->lambda2;
2556  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2557  }
2558 
2559  if(*next_block){
2560  memcpy(s->dest, dest_backup, sizeof(s->dest));
2561  }
2562 
2563  if(score<*dmin){
2564  *dmin= score;
2565  *next_block^=1;
2566 
2568  }
2569 }
2570 
2571 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2572  const uint32_t *sq = ff_square_tab + 256;
2573  int acc=0;
2574  int x,y;
2575 
2576  if(w==16 && h==16)
2577  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2578  else if(w==8 && h==8)
2579  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2580 
2581  for(y=0; y<h; y++){
2582  for(x=0; x<w; x++){
2583  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2584  }
2585  }
2586 
2587  av_assert2(acc>=0);
2588 
2589  return acc;
2590 }
2591 
2592 static int sse_mb(MpegEncContext *s){
2593  int w= 16;
2594  int h= 16;
2595 
2596  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2597  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2598 
2599  if(w==16 && h==16)
2600  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2601  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2602  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2603  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2604  }else{
2605  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2606  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2607  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2608  }
2609  else
2610  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2611  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2612  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2613 }
2614 
2616  MpegEncContext *s= *(void**)arg;
2617 
2618 
2619  s->me.pre_pass=1;
2620  s->me.dia_size= s->avctx->pre_dia_size;
2621  s->first_slice_line=1;
2622  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2623  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2624  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2625  }
2626  s->first_slice_line=0;
2627  }
2628 
2629  s->me.pre_pass=0;
2630 
2631  return 0;
2632 }
2633 
2635  MpegEncContext *s= *(void**)arg;
2636 
2637  s->me.dia_size= s->avctx->dia_size;
2638  s->first_slice_line=1;
2639  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2640  s->mb_x=0; //for block init below
2642  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2643  s->block_index[0]+=2;
2644  s->block_index[1]+=2;
2645  s->block_index[2]+=2;
2646  s->block_index[3]+=2;
2647 
2648  /* compute motion vector & mb_type and store in context */
2649  if(s->pict_type==AV_PICTURE_TYPE_B)
2650  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2651  else
2652  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2653  }
2654  s->first_slice_line=0;
2655  }
2656  return 0;
2657 }
2658 
2659 static int mb_var_thread(AVCodecContext *c, void *arg){
2660  MpegEncContext *s= *(void**)arg;
2661  int mb_x, mb_y;
2662 
2663  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2664  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2665  int xx = mb_x * 16;
2666  int yy = mb_y * 16;
2667  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2668  int varc;
2669  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2670 
2671  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2672  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2673 
2674  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2675  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2676  s->me.mb_var_sum_temp += varc;
2677  }
2678  }
2679  return 0;
2680 }
2681 
2683  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2684  if(s->partitioned_frame){
2686  }
2687 
2688  ff_mpeg4_stuffing(&s->pb);
2689  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2690  s->out_format == FMT_MJPEG) {
2692  } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2694  }
2695 
2696  flush_put_bits(&s->pb);
2697 
2698  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2699  s->misc_bits+= get_bits_diff(s);
2700 }
2701 
2703 {
2704  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2705  int offset = put_bits_count(&s->pb);
2706  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2707  int gobn = s->mb_y / s->gob_index;
2708  int pred_x, pred_y;
2709  if (CONFIG_H263_ENCODER)
2710  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2711  bytestream_put_le32(&ptr, offset);
2712  bytestream_put_byte(&ptr, s->qscale);
2713  bytestream_put_byte(&ptr, gobn);
2714  bytestream_put_le16(&ptr, mba);
2715  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2716  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2717  /* 4MV not implemented */
2718  bytestream_put_byte(&ptr, 0); /* hmv2 */
2719  bytestream_put_byte(&ptr, 0); /* vmv2 */
2720 }
2721 
2722 static void update_mb_info(MpegEncContext *s, int startcode)
2723 {
2724  if (!s->mb_info)
2725  return;
2726  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2727  s->mb_info_size += 12;
2728  s->prev_mb_info = s->last_mb_info;
2729  }
2730  if (startcode) {
2731  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2732  /* This might have incremented mb_info_size above, and we return without
2733  * actually writing any info into that slot yet. But in that case,
2734  * this will be called again at the start of the after writing the
2735  * start code, actually writing the mb info. */
2736  return;
2737  }
2738 
2739  s->last_mb_info = put_bytes_count(&s->pb, 0);
2740  if (!s->mb_info_size)
2741  s->mb_info_size += 12;
2742  write_mb_info(s);
2743 }
2744 
2745 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2746 {
2747  if (put_bytes_left(&s->pb, 0) < threshold
2748  && s->slice_context_count == 1
2749  && s->pb.buf == s->avctx->internal->byte_buffer) {
2750  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2751  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2752 
2753  uint8_t *new_buffer = NULL;
2754  int new_buffer_size = 0;
2755 
2756  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2757  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2758  return AVERROR(ENOMEM);
2759  }
2760 
2761  emms_c();
2762 
2763  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2764  s->avctx->internal->byte_buffer_size + size_increase);
2765  if (!new_buffer)
2766  return AVERROR(ENOMEM);
2767 
2768  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2769  av_free(s->avctx->internal->byte_buffer);
2770  s->avctx->internal->byte_buffer = new_buffer;
2771  s->avctx->internal->byte_buffer_size = new_buffer_size;
2772  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2773  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2774  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2775  }
2776  if (put_bytes_left(&s->pb, 0) < threshold)
2777  return AVERROR(EINVAL);
2778  return 0;
2779 }
2780 
2781 static int encode_thread(AVCodecContext *c, void *arg){
2782  MpegEncContext *s= *(void**)arg;
2783  int mb_x, mb_y, mb_y_order;
2784  int chr_h= 16>>s->chroma_y_shift;
2785  int i, j;
2786  MpegEncContext best_s = { 0 }, backup_s;
2787  uint8_t bit_buf[2][MAX_MB_BYTES];
2788  uint8_t bit_buf2[2][MAX_MB_BYTES];
2789  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2790  PutBitContext pb[2], pb2[2], tex_pb[2];
2791 
2792  for(i=0; i<2; i++){
2793  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2794  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2795  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2796  }
2797 
2798  s->last_bits= put_bits_count(&s->pb);
2799  s->mv_bits=0;
2800  s->misc_bits=0;
2801  s->i_tex_bits=0;
2802  s->p_tex_bits=0;
2803  s->i_count=0;
2804  s->f_count=0;
2805  s->b_count=0;
2806  s->skip_count=0;
2807 
2808  for(i=0; i<3; i++){
2809  /* init last dc values */
2810  /* note: quant matrix value (8) is implied here */
2811  s->last_dc[i] = 128 << s->intra_dc_precision;
2812 
2813  s->current_picture.encoding_error[i] = 0;
2814  }
2815  if(s->codec_id==AV_CODEC_ID_AMV){
2816  s->last_dc[0] = 128*8/13;
2817  s->last_dc[1] = 128*8/14;
2818  s->last_dc[2] = 128*8/14;
2819  }
2820  s->mb_skip_run = 0;
2821  memset(s->last_mv, 0, sizeof(s->last_mv));
2822 
2823  s->last_mv_dir = 0;
2824 
2825  switch(s->codec_id){
2826  case AV_CODEC_ID_H263:
2827  case AV_CODEC_ID_H263P:
2828  case AV_CODEC_ID_FLV1:
2829  if (CONFIG_H263_ENCODER)
2830  s->gob_index = H263_GOB_HEIGHT(s->height);
2831  break;
2832  case AV_CODEC_ID_MPEG4:
2833  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2835  break;
2836  }
2837 
2838  s->resync_mb_x=0;
2839  s->resync_mb_y=0;
2840  s->first_slice_line = 1;
2841  s->ptr_lastgob = s->pb.buf;
2842  for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2843  if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2844  int first_in_slice;
2845  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2846  if (first_in_slice && mb_y_order != s->start_mb_y)
2848  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2849  } else {
2850  mb_y = mb_y_order;
2851  }
2852  s->mb_x=0;
2853  s->mb_y= mb_y;
2854 
2855  ff_set_qscale(s, s->qscale);
2857 
2858  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2859  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2860  int mb_type= s->mb_type[xy];
2861 // int d;
2862  int dmin= INT_MAX;
2863  int dir;
2864  int size_increase = s->avctx->internal->byte_buffer_size/4
2865  + s->mb_width*MAX_MB_BYTES;
2866 
2868  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2869  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2870  return -1;
2871  }
2872  if(s->data_partitioning){
2873  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2874  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2875  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2876  return -1;
2877  }
2878  }
2879 
2880  s->mb_x = mb_x;
2881  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2883 
2884  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2886  xy= s->mb_y*s->mb_stride + s->mb_x;
2887  mb_type= s->mb_type[xy];
2888  }
2889 
2890  /* write gob / video packet header */
2891  if(s->rtp_mode){
2892  int current_packet_size, is_gob_start;
2893 
2894  current_packet_size = put_bytes_count(&s->pb, 1)
2895  - (s->ptr_lastgob - s->pb.buf);
2896 
2897  is_gob_start = s->rtp_payload_size &&
2898  current_packet_size >= s->rtp_payload_size &&
2899  mb_y + mb_x > 0;
2900 
2901  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2902 
2903  switch(s->codec_id){
2904  case AV_CODEC_ID_H263:
2905  case AV_CODEC_ID_H263P:
2906  if(!s->h263_slice_structured)
2907  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2908  break;
2910  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2912  if(s->mb_skip_run) is_gob_start=0;
2913  break;
2914  case AV_CODEC_ID_MJPEG:
2915  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2916  break;
2917  }
2918 
2919  if(is_gob_start){
2920  if(s->start_mb_y != mb_y || mb_x!=0){
2921  write_slice_end(s);
2922 
2923  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2925  }
2926  }
2927 
2928  av_assert2((put_bits_count(&s->pb)&7) == 0);
2929  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2930 
2931  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2932  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
2933  int d = 100 / s->error_rate;
2934  if(r % d == 0){
2935  current_packet_size=0;
2936  s->pb.buf_ptr= s->ptr_lastgob;
2937  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2938  }
2939  }
2940 
2941  update_mb_info(s, 1);
2942 
2943  switch(s->codec_id){
2944  case AV_CODEC_ID_MPEG4:
2945  if (CONFIG_MPEG4_ENCODER) {
2948  }
2949  break;
2952  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2955  }
2956  break;
2957  case AV_CODEC_ID_H263:
2958  case AV_CODEC_ID_H263P:
2959  if (CONFIG_H263_ENCODER)
2961  break;
2962  }
2963 
2964  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
2965  int bits= put_bits_count(&s->pb);
2966  s->misc_bits+= bits - s->last_bits;
2967  s->last_bits= bits;
2968  }
2969 
2970  s->ptr_lastgob += current_packet_size;
2971  s->first_slice_line=1;
2972  s->resync_mb_x=mb_x;
2973  s->resync_mb_y=mb_y;
2974  }
2975  }
2976 
2977  if( (s->resync_mb_x == s->mb_x)
2978  && s->resync_mb_y+1 == s->mb_y){
2979  s->first_slice_line=0;
2980  }
2981 
2982  s->mb_skipped=0;
2983  s->dquant=0; //only for QP_RD
2984 
2985  update_mb_info(s, 0);
2986 
2987  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2988  int next_block=0;
2989  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2990 
2991  copy_context_before_encode(&backup_s, s, -1);
2992  backup_s.pb= s->pb;
2993  best_s.data_partitioning= s->data_partitioning;
2994  best_s.partitioned_frame= s->partitioned_frame;
2995  if(s->data_partitioning){
2996  backup_s.pb2= s->pb2;
2997  backup_s.tex_pb= s->tex_pb;
2998  }
2999 
3001  s->mv_dir = MV_DIR_FORWARD;
3002  s->mv_type = MV_TYPE_16X16;
3003  s->mb_intra= 0;
3004  s->mv[0][0][0] = s->p_mv_table[xy][0];
3005  s->mv[0][0][1] = s->p_mv_table[xy][1];
3006  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3007  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3008  }
3010  s->mv_dir = MV_DIR_FORWARD;
3011  s->mv_type = MV_TYPE_FIELD;
3012  s->mb_intra= 0;
3013  for(i=0; i<2; i++){
3014  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3015  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3016  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3017  }
3018  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3019  &dmin, &next_block, 0, 0);
3020  }
3022  s->mv_dir = MV_DIR_FORWARD;
3023  s->mv_type = MV_TYPE_16X16;
3024  s->mb_intra= 0;
3025  s->mv[0][0][0] = 0;
3026  s->mv[0][0][1] = 0;
3027  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3028  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3029  }
3031  s->mv_dir = MV_DIR_FORWARD;
3032  s->mv_type = MV_TYPE_8X8;
3033  s->mb_intra= 0;
3034  for(i=0; i<4; i++){
3035  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3036  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3037  }
3038  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3039  &dmin, &next_block, 0, 0);
3040  }
3042  s->mv_dir = MV_DIR_FORWARD;
3043  s->mv_type = MV_TYPE_16X16;
3044  s->mb_intra= 0;
3045  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3046  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3047  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3048  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3049  }
3051  s->mv_dir = MV_DIR_BACKWARD;
3052  s->mv_type = MV_TYPE_16X16;
3053  s->mb_intra= 0;
3054  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3055  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3056  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3057  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3058  }
3060  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3061  s->mv_type = MV_TYPE_16X16;
3062  s->mb_intra= 0;
3063  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3064  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3065  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3066  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3067  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3068  &dmin, &next_block, 0, 0);
3069  }
3071  s->mv_dir = MV_DIR_FORWARD;
3072  s->mv_type = MV_TYPE_FIELD;
3073  s->mb_intra= 0;
3074  for(i=0; i<2; i++){
3075  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3076  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3077  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3078  }
3079  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3080  &dmin, &next_block, 0, 0);
3081  }
3083  s->mv_dir = MV_DIR_BACKWARD;
3084  s->mv_type = MV_TYPE_FIELD;
3085  s->mb_intra= 0;
3086  for(i=0; i<2; i++){
3087  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3088  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3089  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3090  }
3091  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3092  &dmin, &next_block, 0, 0);
3093  }
3095  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3096  s->mv_type = MV_TYPE_FIELD;
3097  s->mb_intra= 0;
3098  for(dir=0; dir<2; dir++){
3099  for(i=0; i<2; i++){
3100  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3101  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3102  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3103  }
3104  }
3105  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3106  &dmin, &next_block, 0, 0);
3107  }
3109  s->mv_dir = 0;
3110  s->mv_type = MV_TYPE_16X16;
3111  s->mb_intra= 1;
3112  s->mv[0][0][0] = 0;
3113  s->mv[0][0][1] = 0;
3114  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3115  &dmin, &next_block, 0, 0);
3116  if(s->h263_pred || s->h263_aic){
3117  if(best_s.mb_intra)
3118  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3119  else
3120  ff_clean_intra_table_entries(s); //old mode?
3121  }
3122  }
3123 
3124  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3125  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3126  const int last_qp= backup_s.qscale;
3127  int qpi, qp, dc[6];
3128  int16_t ac[6][16];
3129  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3130  static const int dquant_tab[4]={-1,1,-2,2};
3131  int storecoefs = s->mb_intra && s->dc_val[0];
3132 
3133  av_assert2(backup_s.dquant == 0);
3134 
3135  //FIXME intra
3136  s->mv_dir= best_s.mv_dir;
3137  s->mv_type = MV_TYPE_16X16;
3138  s->mb_intra= best_s.mb_intra;
3139  s->mv[0][0][0] = best_s.mv[0][0][0];
3140  s->mv[0][0][1] = best_s.mv[0][0][1];
3141  s->mv[1][0][0] = best_s.mv[1][0][0];
3142  s->mv[1][0][1] = best_s.mv[1][0][1];
3143 
3144  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3145  for(; qpi<4; qpi++){
3146  int dquant= dquant_tab[qpi];
3147  qp= last_qp + dquant;
3148  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3149  continue;
3150  backup_s.dquant= dquant;
3151  if(storecoefs){
3152  for(i=0; i<6; i++){
3153  dc[i]= s->dc_val[0][ s->block_index[i] ];
3154  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3155  }
3156  }
3157 
3158  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3159  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3160  if(best_s.qscale != qp){
3161  if(storecoefs){
3162  for(i=0; i<6; i++){
3163  s->dc_val[0][ s->block_index[i] ]= dc[i];
3164  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3165  }
3166  }
3167  }
3168  }
3169  }
3170  }
3171  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3172  int mx= s->b_direct_mv_table[xy][0];
3173  int my= s->b_direct_mv_table[xy][1];
3174 
3175  backup_s.dquant = 0;
3176  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3177  s->mb_intra= 0;
3178  ff_mpeg4_set_direct_mv(s, mx, my);
3179  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3180  &dmin, &next_block, mx, my);
3181  }
3182  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3183  backup_s.dquant = 0;
3184  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3185  s->mb_intra= 0;
3186  ff_mpeg4_set_direct_mv(s, 0, 0);
3187  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3188  &dmin, &next_block, 0, 0);
3189  }
3190  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3191  int coded=0;
3192  for(i=0; i<6; i++)
3193  coded |= s->block_last_index[i];
3194  if(coded){
3195  int mx,my;
3196  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3197  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3198  mx=my=0; //FIXME find the one we actually used
3199  ff_mpeg4_set_direct_mv(s, mx, my);
3200  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3201  mx= s->mv[1][0][0];
3202  my= s->mv[1][0][1];
3203  }else{
3204  mx= s->mv[0][0][0];
3205  my= s->mv[0][0][1];
3206  }
3207 
3208  s->mv_dir= best_s.mv_dir;
3209  s->mv_type = best_s.mv_type;
3210  s->mb_intra= 0;
3211 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3212  s->mv[0][0][1] = best_s.mv[0][0][1];
3213  s->mv[1][0][0] = best_s.mv[1][0][0];
3214  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3215  backup_s.dquant= 0;
3216  s->skipdct=1;
3217  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3218  &dmin, &next_block, mx, my);
3219  s->skipdct=0;
3220  }
3221  }
3222 
3223  s->current_picture.qscale_table[xy] = best_s.qscale;
3224 
3225  copy_context_after_encode(s, &best_s, -1);
3226 
3227  pb_bits_count= put_bits_count(&s->pb);
3228  flush_put_bits(&s->pb);
3229  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3230  s->pb= backup_s.pb;
3231 
3232  if(s->data_partitioning){
3233  pb2_bits_count= put_bits_count(&s->pb2);
3234  flush_put_bits(&s->pb2);
3235  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3236  s->pb2= backup_s.pb2;
3237 
3238  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3239  flush_put_bits(&s->tex_pb);
3240  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3241  s->tex_pb= backup_s.tex_pb;
3242  }
3243  s->last_bits= put_bits_count(&s->pb);
3244 
3245  if (CONFIG_H263_ENCODER &&
3246  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3248 
3249  if(next_block==0){ //FIXME 16 vs linesize16
3250  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3251  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3252  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3253  }
3254 
3255  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3256  ff_mpv_reconstruct_mb(s, s->block);
3257  } else {
3258  int motion_x = 0, motion_y = 0;
3259  s->mv_type=MV_TYPE_16X16;
3260  // only one MB-Type possible
3261 
3262  switch(mb_type){
3264  s->mv_dir = 0;
3265  s->mb_intra= 1;
3266  motion_x= s->mv[0][0][0] = 0;
3267  motion_y= s->mv[0][0][1] = 0;
3268  break;
3270  s->mv_dir = MV_DIR_FORWARD;
3271  s->mb_intra= 0;
3272  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3273  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3274  break;
3276  s->mv_dir = MV_DIR_FORWARD;
3277  s->mv_type = MV_TYPE_FIELD;
3278  s->mb_intra= 0;
3279  for(i=0; i<2; i++){
3280  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3281  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3282  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3283  }
3284  break;
3286  s->mv_dir = MV_DIR_FORWARD;
3287  s->mv_type = MV_TYPE_8X8;
3288  s->mb_intra= 0;
3289  for(i=0; i<4; i++){
3290  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3291  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3292  }
3293  break;
3295  if (CONFIG_MPEG4_ENCODER) {
3297  s->mb_intra= 0;
3298  motion_x=s->b_direct_mv_table[xy][0];
3299  motion_y=s->b_direct_mv_table[xy][1];
3300  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3301  }
3302  break;
3304  if (CONFIG_MPEG4_ENCODER) {
3306  s->mb_intra= 0;
3307  ff_mpeg4_set_direct_mv(s, 0, 0);
3308  }
3309  break;
3311  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3312  s->mb_intra= 0;
3313  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3314  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3315  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3316  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3317  break;
3319  s->mv_dir = MV_DIR_BACKWARD;
3320  s->mb_intra= 0;
3321  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3322  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3323  break;
3325  s->mv_dir = MV_DIR_FORWARD;
3326  s->mb_intra= 0;
3327  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3328  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3329  break;
3331  s->mv_dir = MV_DIR_FORWARD;
3332  s->mv_type = MV_TYPE_FIELD;
3333  s->mb_intra= 0;
3334  for(i=0; i<2; i++){
3335  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3336  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3337  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3338  }
3339  break;
3341  s->mv_dir = MV_DIR_BACKWARD;
3342  s->mv_type = MV_TYPE_FIELD;
3343  s->mb_intra= 0;
3344  for(i=0; i<2; i++){
3345  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3346  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3347  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3348  }
3349  break;
3351  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3352  s->mv_type = MV_TYPE_FIELD;
3353  s->mb_intra= 0;
3354  for(dir=0; dir<2; dir++){
3355  for(i=0; i<2; i++){
3356  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3357  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3358  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3359  }
3360  }
3361  break;
3362  default:
3363  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3364  }
3365 
3366  encode_mb(s, motion_x, motion_y);
3367 
3368  // RAL: Update last macroblock type
3369  s->last_mv_dir = s->mv_dir;
3370 
3371  if (CONFIG_H263_ENCODER &&
3372  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3374 
3375  ff_mpv_reconstruct_mb(s, s->block);
3376  }
3377 
3378  /* clean the MV table in IPS frames for direct mode in B-frames */
3379  if(s->mb_intra /* && I,P,S_TYPE */){
3380  s->p_mv_table[xy][0]=0;
3381  s->p_mv_table[xy][1]=0;
3382  }
3383 
3384  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3385  int w= 16;
3386  int h= 16;
3387 
3388  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3389  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3390 
3391  s->current_picture.encoding_error[0] += sse(
3392  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3393  s->dest[0], w, h, s->linesize);
3394  s->current_picture.encoding_error[1] += sse(
3395  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3396  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3397  s->current_picture.encoding_error[2] += sse(
3398  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3399  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3400  }
3401  if(s->loop_filter){
3402  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3404  }
3405  ff_dlog(s->avctx, "MB %d %d bits\n",
3406  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3407  }
3408  }
3409 
3410  //not beautiful here but we must write it before flushing so it has to be here
3411  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3413 
3414  write_slice_end(s);
3415 
3416  return 0;
3417 }
3418 
3419 #define MERGE(field) dst->field += src->field; src->field=0
3421  MERGE(me.scene_change_score);
3422  MERGE(me.mc_mb_var_sum_temp);
3423  MERGE(me.mb_var_sum_temp);
3424 }
3425 
3427  int i;
3428 
3429  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3430  MERGE(dct_count[1]);
3431  MERGE(mv_bits);
3432  MERGE(i_tex_bits);
3433  MERGE(p_tex_bits);
3434  MERGE(i_count);
3435  MERGE(f_count);
3436  MERGE(b_count);
3437  MERGE(skip_count);
3438  MERGE(misc_bits);
3439  MERGE(er.error_count);
3444 
3445  if (dst->noise_reduction){
3446  for(i=0; i<64; i++){
3447  MERGE(dct_error_sum[0][i]);
3448  MERGE(dct_error_sum[1][i]);
3449  }
3450  }
3451 
3452  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3453  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3454  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3455  flush_put_bits(&dst->pb);
3456 }
3457 
3458 static int estimate_qp(MpegEncContext *s, int dry_run){
3459  if (s->next_lambda){
3460  s->current_picture_ptr->f->quality =
3461  s->current_picture.f->quality = s->next_lambda;
3462  if(!dry_run) s->next_lambda= 0;
3463  } else if (!s->fixed_qscale) {
3464  int quality = ff_rate_estimate_qscale(s, dry_run);
3465  s->current_picture_ptr->f->quality =
3466  s->current_picture.f->quality = quality;
3467  if (s->current_picture.f->quality < 0)
3468  return -1;
3469  }
3470 
3471  if(s->adaptive_quant){
3472  switch(s->codec_id){
3473  case AV_CODEC_ID_MPEG4:
3474  if (CONFIG_MPEG4_ENCODER)
3476  break;
3477  case AV_CODEC_ID_H263:
3478  case AV_CODEC_ID_H263P:
3479  case AV_CODEC_ID_FLV1:
3480  if (CONFIG_H263_ENCODER)
3482  break;
3483  default:
3485  }
3486 
3487  s->lambda= s->lambda_table[0];
3488  //FIXME broken
3489  }else
3490  s->lambda = s->current_picture.f->quality;
3491  update_qscale(s);
3492  return 0;
3493 }
3494 
3495 /* must be called before writing the header */
3497  av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3498  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3499 
3500  if(s->pict_type==AV_PICTURE_TYPE_B){
3501  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3502  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3503  }else{
3504  s->pp_time= s->time - s->last_non_b_time;
3505  s->last_non_b_time= s->time;
3506  av_assert1(s->picture_number==0 || s->pp_time > 0);
3507  }
3508 }
3509 
3511 {
3512  int i, ret;
3513  int bits;
3514  int context_count = s->slice_context_count;
3515 
3516  s->picture_number = picture_number;
3517 
3518  /* Reset the average MB variance */
3519  s->me.mb_var_sum_temp =
3520  s->me.mc_mb_var_sum_temp = 0;
3521 
3522  /* we need to initialize some time vars before we can encode B-frames */
3523  // RAL: Condition added for MPEG1VIDEO
3524  if (s->out_format == FMT_MPEG1 || (s->h263_pred && !s->msmpeg4_version))
3526  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3528 
3529  s->me.scene_change_score=0;
3530 
3531 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3532 
3533  if(s->pict_type==AV_PICTURE_TYPE_I){
3534  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3535  else s->no_rounding=0;
3536  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3537  if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3538  s->no_rounding ^= 1;
3539  }
3540 
3541  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3542  if (estimate_qp(s,1) < 0)
3543  return -1;
3545  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3546  if(s->pict_type==AV_PICTURE_TYPE_B)
3547  s->lambda= s->last_lambda_for[s->pict_type];
3548  else
3549  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3550  update_qscale(s);
3551  }
3552 
3553  if (s->out_format != FMT_MJPEG) {
3554  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3555  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3556  s->q_chroma_intra_matrix = s->q_intra_matrix;
3557  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3558  }
3559 
3560  s->mb_intra=0; //for the rate distortion & bit compare functions
3561  for(i=1; i<context_count; i++){
3562  ret = ff_update_duplicate_context(s->thread_context[i], s);
3563  if (ret < 0)
3564  return ret;
3565  }
3566 
3567  if(ff_init_me(s)<0)
3568  return -1;
3569 
3570  /* Estimate motion for every MB */
3571  if(s->pict_type != AV_PICTURE_TYPE_I){
3572  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3573  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3574  if (s->pict_type != AV_PICTURE_TYPE_B) {
3575  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3576  s->me_pre == 2) {
3577  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3578  }
3579  }
3580 
3581  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3582  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3583  /* I-Frame */
3584  for(i=0; i<s->mb_stride*s->mb_height; i++)
3585  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3586 
3587  if(!s->fixed_qscale){
3588  /* finding spatial complexity for I-frame rate control */
3589  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3590  }
3591  }
3592  for(i=1; i<context_count; i++){
3593  merge_context_after_me(s, s->thread_context[i]);
3594  }
3595  s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3596  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3597  emms_c();
3598 
3599  if (s->me.scene_change_score > s->scenechange_threshold &&
3600  s->pict_type == AV_PICTURE_TYPE_P) {
3601  s->pict_type= AV_PICTURE_TYPE_I;
3602  for(i=0; i<s->mb_stride*s->mb_height; i++)
3603  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3604  if(s->msmpeg4_version >= 3)
3605  s->no_rounding=1;
3606  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3607  s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3608  }
3609 
3610  if(!s->umvplus){
3611  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3612  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3613 
3614  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3615  int a,b;
3616  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3617  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3618  s->f_code= FFMAX3(s->f_code, a, b);
3619  }
3620 
3622  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3623  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3624  int j;
3625  for(i=0; i<2; i++){
3626  for(j=0; j<2; j++)
3627  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3628  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3629  }
3630  }
3631  }
3632 
3633  if(s->pict_type==AV_PICTURE_TYPE_B){
3634  int a, b;
3635 
3636  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3637  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3638  s->f_code = FFMAX(a, b);
3639 
3640  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3641  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3642  s->b_code = FFMAX(a, b);
3643 
3644  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3645  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3646  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3647  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3648  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3649  int dir, j;
3650  for(dir=0; dir<2; dir++){
3651  for(i=0; i<2; i++){
3652  for(j=0; j<2; j++){
3655  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3656  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3657  }
3658  }
3659  }
3660  }
3661  }
3662  }
3663 
3664  if (estimate_qp(s, 0) < 0)
3665  return -1;
3666 
3667  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3668  s->pict_type == AV_PICTURE_TYPE_I &&
3669  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3670  s->qscale= 3; //reduce clipping problems
3671 
3672  if (s->out_format == FMT_MJPEG) {
3673  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3674  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3675 
3676  if (s->avctx->intra_matrix) {
3677  chroma_matrix =
3678  luma_matrix = s->avctx->intra_matrix;
3679  }
3680  if (s->avctx->chroma_intra_matrix)
3681  chroma_matrix = s->avctx->chroma_intra_matrix;
3682 
3683  /* for mjpeg, we do include qscale in the matrix */
3684  for(i=1;i<64;i++){
3685  int j = s->idsp.idct_permutation[i];
3686 
3687  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3688  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3689  }
3690  s->y_dc_scale_table=
3691  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3692  s->chroma_intra_matrix[0] =
3693  s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3694  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3695  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3696  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3697  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3698  s->qscale= 8;
3699 
3700  if (s->codec_id == AV_CODEC_ID_AMV) {
3701  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3702  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3703  for (int i = 1; i < 64; i++) {
3704  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
3705 
3706  s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3707  s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3708  }
3709  s->y_dc_scale_table = y;
3710  s->c_dc_scale_table = c;
3711  s->intra_matrix[0] = 13;
3712  s->chroma_intra_matrix[0] = 14;
3713  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3714  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3715  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3716  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3717  s->qscale = 8;
3718  }
3719  } else if (s->out_format == FMT_SPEEDHQ) {
3720  s->y_dc_scale_table=
3721  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3722  }
3723 
3724  //FIXME var duplication
3725  s->current_picture_ptr->f->key_frame =
3726  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3727  s->current_picture_ptr->f->pict_type =
3728  s->current_picture.f->pict_type = s->pict_type;
3729 
3730  if (s->current_picture.f->key_frame)
3731  s->picture_in_gop_number=0;
3732 
3733  s->mb_x = s->mb_y = 0;
3734  s->last_bits= put_bits_count(&s->pb);
3735  switch(s->out_format) {
3736 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3737  case FMT_MJPEG:
3738  /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
3739  if (!CONFIG_MJPEG_ENCODER || s->huffman != HUFFMAN_TABLE_OPTIMAL)
3740  ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3741  s->pred, s->intra_matrix, s->chroma_intra_matrix);
3742  break;
3743 #endif
3744  case FMT_SPEEDHQ:
3745  if (CONFIG_SPEEDHQ_ENCODER)
3747  break;
3748  case FMT_H261:
3749  if (CONFIG_H261_ENCODER)
3751  break;
3752  case FMT_H263:
3753  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3755  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3757  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3759  if (ret < 0)
3760  return ret;
3761  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3763  if (ret < 0)
3764  return ret;
3765  }
3766  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3768  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3770  else if (CONFIG_H263_ENCODER)
3772  break;
3773  case FMT_MPEG1:
3774  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3776  break;
3777  default:
3778  av_assert0(0);
3779  }
3780  bits= put_bits_count(&s->pb);
3781  s->header_bits= bits - s->last_bits;
3782 
3783  for(i=1; i<context_count; i++){
3784  update_duplicate_context_after_me(s->thread_context[i], s);
3785  }
3786  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3787  for(i=1; i<context_count; i++){
3788  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3789  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3790  merge_context_after_encode(s, s->thread_context[i]);
3791  }
3792  emms_c();
3793  return 0;
3794 }
3795 
3796 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3797  const int intra= s->mb_intra;
3798  int i;
3799 
3800  s->dct_count[intra]++;
3801 
3802  for(i=0; i<64; i++){
3803  int level= block[i];
3804 
3805  if(level){
3806  if(level>0){
3807  s->dct_error_sum[intra][i] += level;
3808  level -= s->dct_offset[intra][i];
3809  if(level<0) level=0;
3810  }else{
3811  s->dct_error_sum[intra][i] -= level;
3812  level += s->dct_offset[intra][i];
3813  if(level>0) level=0;
3814  }
3815  block[i]= level;
3816  }
3817  }
3818 }
3819 
3821  int16_t *block, int n,
3822  int qscale, int *overflow){
3823  const int *qmat;
3824  const uint16_t *matrix;
3825  const uint8_t *scantable;
3826  const uint8_t *perm_scantable;
3827  int max=0;
3828  unsigned int threshold1, threshold2;
3829  int bias=0;
3830  int run_tab[65];
3831  int level_tab[65];
3832  int score_tab[65];
3833  int survivor[65];
3834  int survivor_count;
3835  int last_run=0;
3836  int last_level=0;
3837  int last_score= 0;
3838  int last_i;
3839  int coeff[2][64];
3840  int coeff_count[64];
3841  int qmul, qadd, start_i, last_non_zero, i, dc;
3842  const int esc_length= s->ac_esc_length;
3843  uint8_t * length;
3844  uint8_t * last_length;
3845  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3846  int mpeg2_qscale;
3847 
3848  s->fdsp.fdct(block);
3849 
3850  if(s->dct_error_sum)
3851  s->denoise_dct(s, block);
3852  qmul= qscale*16;
3853  qadd= ((qscale-1)|1)*8;
3854 
3855  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3856  else mpeg2_qscale = qscale << 1;
3857 
3858  if (s->mb_intra) {
3859  int q;
3860  scantable= s->intra_scantable.scantable;
3861  perm_scantable= s->intra_scantable.permutated;
3862  if (!s->h263_aic) {
3863  if (n < 4)
3864  q = s->y_dc_scale;
3865  else
3866  q = s->c_dc_scale;
3867  q = q << 3;
3868  } else{
3869  /* For AIC we skip quant/dequant of INTRADC */
3870  q = 1 << 3;
3871  qadd=0;
3872  }
3873 
3874  /* note: block[0] is assumed to be positive */
3875  block[0] = (block[0] + (q >> 1)) / q;
3876  start_i = 1;
3877  last_non_zero = 0;
3878  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3879  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3880  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3881  bias= 1<<(QMAT_SHIFT-1);
3882 
3883  if (n > 3 && s->intra_chroma_ac_vlc_length) {
3884  length = s->intra_chroma_ac_vlc_length;
3885  last_length= s->intra_chroma_ac_vlc_last_length;
3886  } else {
3887  length = s->intra_ac_vlc_length;
3888  last_length= s->intra_ac_vlc_last_length;
3889  }
3890  } else {
3891  scantable= s->inter_scantable.scantable;
3892  perm_scantable= s->inter_scantable.permutated;
3893  start_i = 0;
3894  last_non_zero = -1;
3895  qmat = s->q_inter_matrix[qscale];
3896  matrix = s->inter_matrix;
3897  length = s->inter_ac_vlc_length;
3898  last_length= s->inter_ac_vlc_last_length;
3899  }
3900  last_i= start_i;
3901 
3902  threshold1= (1<<QMAT_SHIFT) - bias - 1;
3903  threshold2= (threshold1<<1);
3904 
3905  for(i=63; i>=start_i; i--) {
3906  const int j = scantable[i];
3907  int level = block[j] * qmat[j];
3908 
3909  if(((unsigned)(level+threshold1))>threshold2){
3910  last_non_zero = i;
3911  break;
3912  }
3913  }
3914 
3915  for(i=start_i; i<=last_non_zero; i++) {
3916  const int j = scantable[i];
3917  int level = block[j] * qmat[j];
3918 
3919 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3920 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3921  if(((unsigned)(level+threshold1))>threshold2){
3922  if(level>0){
3923  level= (bias + level)>>QMAT_SHIFT;
3924  coeff[0][i]= level;
3925  coeff[1][i]= level-1;
3926 // coeff[2][k]= level-2;
3927  }else{
3928  level= (bias - level)>>QMAT_SHIFT;
3929  coeff[0][i]= -level;
3930  coeff[1][i]= -level+1;
3931 // coeff[2][k]= -level+2;
3932  }
3933  coeff_count[i]= FFMIN(level, 2);
3934  av_assert2(coeff_count[i]);
3935  max |=level;
3936  }else{
3937  coeff[0][i]= (level>>31)|1;
3938  coeff_count[i]= 1;
3939  }
3940  }
3941 
3942  *overflow= s->max_qcoeff < max; //overflow might have happened
3943 
3944  if(last_non_zero < start_i){
3945  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3946  return last_non_zero;
3947  }
3948 
3949  score_tab[start_i]= 0;
3950  survivor[0]= start_i;
3951  survivor_count= 1;
3952 
3953  for(i=start_i; i<=last_non_zero; i++){
3954  int level_index, j, zero_distortion;
3955  int dct_coeff= FFABS(block[ scantable[i] ]);
3956  int best_score=256*256*256*120;
3957 
3958  if (s->fdsp.fdct == ff_fdct_ifast)
3959  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3960  zero_distortion= dct_coeff*dct_coeff;
3961 
3962  for(level_index=0; level_index < coeff_count[i]; level_index++){
3963  int distortion;
3964  int level= coeff[level_index][i];
3965  const int alevel= FFABS(level);
3966  int unquant_coeff;
3967 
3968  av_assert2(level);
3969 
3970  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3971  unquant_coeff= alevel*qmul + qadd;
3972  } else if(s->out_format == FMT_MJPEG) {
3973  j = s->idsp.idct_permutation[scantable[i]];
3974  unquant_coeff = alevel * matrix[j] * 8;
3975  }else{ // MPEG-1
3976  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
3977  if(s->mb_intra){
3978  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
3979  unquant_coeff = (unquant_coeff - 1) | 1;
3980  }else{
3981  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
3982  unquant_coeff = (unquant_coeff - 1) | 1;
3983  }
3984  unquant_coeff<<= 3;
3985  }
3986 
3987  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3988  level+=64;
3989  if((level&(~127)) == 0){
3990  for(j=survivor_count-1; j>=0; j--){
3991  int run= i - survivor[j];
3992  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3993  score += score_tab[i-run];
3994 
3995  if(score < best_score){
3996  best_score= score;
3997  run_tab[i+1]= run;
3998  level_tab[i+1]= level-64;
3999  }
4000  }
4001 
4002  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4003  for(j=survivor_count-1; j>=0; j--){
4004  int run= i - survivor[j];
4005  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4006  score += score_tab[i-run];
4007  if(score < last_score){
4008  last_score= score;
4009  last_run= run;
4010  last_level= level-64;
4011  last_i= i+1;
4012  }
4013  }
4014  }
4015  }else{
4016  distortion += esc_length*lambda;
4017  for(j=survivor_count-1; j>=0; j--){
4018  int run= i - survivor[j];
4019  int score= distortion + score_tab[i-run];
4020 
4021  if(score < best_score){
4022  best_score= score;
4023  run_tab[i+1]= run;
4024  level_tab[i+1]= level-64;
4025  }
4026  }
4027 
4028  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4029  for(j=survivor_count-1; j>=0; j--){
4030  int run= i - survivor[j];
4031  int score= distortion + score_tab[i-run];
4032  if(score < last_score){
4033  last_score= score;
4034  last_run= run;
4035  last_level= level-64;
4036  last_i= i+1;
4037  }
4038  }
4039  }
4040  }
4041  }
4042 
4043  score_tab[i+1]= best_score;
4044 
4045  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4046  if(last_non_zero <= 27){
4047  for(; survivor_count; survivor_count--){
4048  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4049  break;
4050  }
4051  }else{
4052  for(; survivor_count; survivor_count--){
4053  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4054  break;
4055  }
4056  }
4057 
4058  survivor[ survivor_count++ ]= i+1;
4059  }
4060 
4061  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4062  last_score= 256*256*256*120;
4063  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4064  int score= score_tab[i];
4065  if (i)
4066  score += lambda * 2; // FIXME more exact?
4067 
4068  if(score < last_score){
4069  last_score= score;
4070  last_i= i;
4071  last_level= level_tab[i];
4072  last_run= run_tab[i];
4073  }
4074  }
4075  }
4076 
4077  s->coded_score[n] = last_score;
4078 
4079  dc= FFABS(block[0]);
4080  last_non_zero= last_i - 1;
4081  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4082 
4083  if(last_non_zero < start_i)
4084  return last_non_zero;
4085 
4086  if(last_non_zero == 0 && start_i == 0){
4087  int best_level= 0;
4088  int best_score= dc * dc;
4089 
4090  for(i=0; i<coeff_count[0]; i++){
4091  int level= coeff[i][0];
4092  int alevel= FFABS(level);
4093  int unquant_coeff, score, distortion;
4094 
4095  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4096  unquant_coeff= (alevel*qmul + qadd)>>3;
4097  } else{ // MPEG-1
4098  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4099  unquant_coeff = (unquant_coeff - 1) | 1;
4100  }
4101  unquant_coeff = (unquant_coeff + 4) >> 3;
4102  unquant_coeff<<= 3 + 3;
4103 
4104  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4105  level+=64;
4106  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4107  else score= distortion + esc_length*lambda;
4108 
4109  if(score < best_score){
4110  best_score= score;
4111  best_level= level - 64;
4112  }
4113  }
4114  block[0]= best_level;
4115  s->coded_score[n] = best_score - dc*dc;
4116  if(best_level == 0) return -1;
4117  else return last_non_zero;
4118  }
4119 
4120  i= last_i;
4121  av_assert2(last_level);
4122 
4123  block[ perm_scantable[last_non_zero] ]= last_level;
4124  i -= last_run + 1;
4125 
4126  for(; i>start_i; i -= run_tab[i] + 1){
4127  block[ perm_scantable[i-1] ]= level_tab[i];
4128  }
4129 
4130  return last_non_zero;
4131 }
4132 
4133 static int16_t basis[64][64];
4134 
4135 static void build_basis(uint8_t *perm){
4136  int i, j, x, y;
4137  emms_c();
4138  for(i=0; i<8; i++){
4139  for(j=0; j<8; j++){
4140  for(y=0; y<8; y++){
4141  for(x=0; x<8; x++){
4142  double s= 0.25*(1<<BASIS_SHIFT);
4143  int index= 8*i + j;
4144  int perm_index= perm[index];
4145  if(i==0) s*= sqrt(0.5);
4146  if(j==0) s*= sqrt(0.5);
4147  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4148  }
4149  }
4150  }
4151  }
4152 }
4153 
4154 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4155  int16_t *block, int16_t *weight, int16_t *orig,
4156  int n, int qscale){
4157  int16_t rem[64];
4158  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4159  const uint8_t *scantable;
4160  const uint8_t *perm_scantable;
4161 // unsigned int threshold1, threshold2;
4162 // int bias=0;
4163  int run_tab[65];
4164  int prev_run=0;
4165  int prev_level=0;
4166  int qmul, qadd, start_i, last_non_zero, i, dc;
4167  uint8_t * length;
4168  uint8_t * last_length;
4169  int lambda;
4170  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4171 
4172  if(basis[0][0] == 0)
4173  build_basis(s->idsp.idct_permutation);
4174 
4175  qmul= qscale*2;
4176  qadd= (qscale-1)|1;
4177  if (s->mb_intra) {
4178  scantable= s->intra_scantable.scantable;
4179  perm_scantable= s->intra_scantable.permutated;
4180  if (!s->h263_aic) {
4181  if (n < 4)
4182  q = s->y_dc_scale;
4183  else
4184  q = s->c_dc_scale;
4185  } else{
4186  /* For AIC we skip quant/dequant of INTRADC */
4187  q = 1;
4188  qadd=0;
4189  }
4190  q <<= RECON_SHIFT-3;
4191  /* note: block[0] is assumed to be positive */
4192  dc= block[0]*q;
4193 // block[0] = (block[0] + (q >> 1)) / q;
4194  start_i = 1;
4195 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4196 // bias= 1<<(QMAT_SHIFT-1);
4197  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4198  length = s->intra_chroma_ac_vlc_length;
4199  last_length= s->intra_chroma_ac_vlc_last_length;
4200  } else {
4201  length = s->intra_ac_vlc_length;
4202  last_length= s->intra_ac_vlc_last_length;
4203  }
4204  } else {
4205  scantable= s->inter_scantable.scantable;
4206  perm_scantable= s->inter_scantable.permutated;
4207  dc= 0;
4208  start_i = 0;
4209  length = s->inter_ac_vlc_length;
4210  last_length= s->inter_ac_vlc_last_length;
4211  }
4212  last_non_zero = s->block_last_index[n];
4213 
4214  dc += (1<<(RECON_SHIFT-1));
4215  for(i=0; i<64; i++){
4216  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4217  }
4218 
4219  sum=0;
4220  for(i=0; i<64; i++){
4221  int one= 36;
4222  int qns=4;
4223  int w;
4224 
4225  w= FFABS(weight[i]) + qns*one;
4226  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4227 
4228  weight[i] = w;
4229 // w=weight[i] = (63*qns + (w/2)) / w;
4230 
4231  av_assert2(w>0);
4232  av_assert2(w<(1<<6));
4233  sum += w*w;
4234  }
4235  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4236 
4237  run=0;
4238  rle_index=0;
4239  for(i=start_i; i<=last_non_zero; i++){
4240  int j= perm_scantable[i];
4241  const int level= block[j];
4242  int coeff;
4243 
4244  if(level){
4245  if(level<0) coeff= qmul*level - qadd;
4246  else coeff= qmul*level + qadd;
4247  run_tab[rle_index++]=run;
4248  run=0;
4249 
4250  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4251  }else{
4252  run++;
4253  }
4254  }
4255 
4256  for(;;){
4257  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4258  int best_coeff=0;
4259  int best_change=0;
4260  int run2, best_unquant_change=0, analyze_gradient;
4261  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4262 
4263  if(analyze_gradient){
4264  for(i=0; i<64; i++){
4265  int w= weight[i];
4266 
4267  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4268  }
4269  s->fdsp.fdct(d1);
4270  }
4271 
4272  if(start_i){
4273  const int level= block[0];
4274  int change, old_coeff;
4275 
4276  av_assert2(s->mb_intra);
4277 
4278  old_coeff= q*level;
4279 
4280  for(change=-1; change<=1; change+=2){
4281  int new_level= level + change;
4282  int score, new_coeff;
4283 
4284  new_coeff= q*new_level;
4285  if(new_coeff >= 2048 || new_coeff < 0)
4286  continue;
4287 
4288  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4289  new_coeff - old_coeff);
4290  if(score<best_score){
4291  best_score= score;
4292  best_coeff= 0;
4293  best_change= change;
4294  best_unquant_change= new_coeff - old_coeff;
4295  }
4296  }
4297  }
4298 
4299  run=0;
4300  rle_index=0;
4301  run2= run_tab[rle_index++];
4302  prev_level=0;
4303  prev_run=0;
4304 
4305  for(i=start_i; i<64; i++){
4306  int j= perm_scantable[i];
4307  const int level= block[j];
4308  int change, old_coeff;
4309 
4310  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4311  break;
4312 
4313  if(level){
4314  if(level<0) old_coeff= qmul*level - qadd;
4315  else old_coeff= qmul*level + qadd;
4316  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4317  }else{
4318  old_coeff=0;
4319  run2--;
4320  av_assert2(run2>=0 || i >= last_non_zero );
4321  }
4322 
4323  for(change=-1; change<=1; change+=2){
4324  int new_level= level + change;
4325  int score, new_coeff, unquant_change;
4326 
4327  score=0;
4328  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4329  continue;
4330 
4331  if(new_level){
4332  if(new_level<0) new_coeff= qmul*new_level - qadd;
4333  else new_coeff= qmul*new_level + qadd;
4334  if(new_coeff >= 2048 || new_coeff <= -2048)
4335  continue;
4336  //FIXME check for overflow
4337 
4338  if(level){
4339  if(level < 63 && level > -63){
4340  if(i < last_non_zero)
4341  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4342  - length[UNI_AC_ENC_INDEX(run, level+64)];
4343  else
4344  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4345  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4346  }
4347  }else{
4348  av_assert2(FFABS(new_level)==1);
4349 
4350  if(analyze_gradient){
4351  int g= d1[ scantable[i] ];
4352  if(g && (g^new_level) >= 0)
4353  continue;
4354  }
4355 
4356  if(i < last_non_zero){
4357  int next_i= i + run2 + 1;
4358  int next_level= block[ perm_scantable[next_i] ] + 64;
4359 
4360  if(next_level&(~127))
4361  next_level= 0;
4362 
4363  if(next_i < last_non_zero)
4364  score += length[UNI_AC_ENC_INDEX(run, 65)]
4365  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4366  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4367  else
4368  score += length[UNI_AC_ENC_INDEX(run, 65)]
4369  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4370  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4371  }else{
4372  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4373  if(prev_level){
4374  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4375  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4376  }
4377  }
4378  }
4379  }else{
4380  new_coeff=0;
4381  av_assert2(FFABS(level)==1);
4382 
4383  if(i < last_non_zero){
4384  int next_i= i + run2 + 1;
4385  int next_level= block[ perm_scantable[next_i] ] + 64;
4386 
4387  if(next_level&(~127))
4388  next_level= 0;
4389 
4390  if(next_i < last_non_zero)
4391  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4392  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4393  - length[UNI_AC_ENC_INDEX(run, 65)];
4394  else
4395  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4396  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4397  - length[UNI_AC_ENC_INDEX(run, 65)];
4398  }else{
4399  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4400  if(prev_level){
4401  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4402  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4403  }
4404  }
4405  }
4406 
4407  score *= lambda;
4408 
4409  unquant_change= new_coeff - old_coeff;
4410  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4411 
4412  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4413  unquant_change);
4414  if(score<best_score){
4415  best_score= score;
4416  best_coeff= i;
4417  best_change= change;
4418  best_unquant_change= unquant_change;
4419  }
4420  }
4421  if(level){
4422  prev_level= level + 64;
4423  if(prev_level&(~127))
4424  prev_level= 0;
4425  prev_run= run;
4426  run=0;
4427  }else{
4428  run++;
4429  }
4430  }
4431 
4432  if(best_change){
4433  int j= perm_scantable[ best_coeff ];
4434 
4435  block[j] += best_change;
4436 
4437  if(best_coeff > last_non_zero){
4438  last_non_zero= best_coeff;
4439  av_assert2(block[j]);
4440  }else{
4441  for(; last_non_zero>=start_i; last_non_zero--){
4442  if(block[perm_scantable[last_non_zero]])
4443  break;
4444  }
4445  }
4446 
4447  run=0;
4448  rle_index=0;
4449  for(i=start_i; i<=last_non_zero; i++){
4450  int j= perm_scantable[i];
4451  const int level= block[j];
4452 
4453  if(level){
4454  run_tab[rle_index++]=run;
4455  run=0;
4456  }else{
4457  run++;
4458  }
4459  }
4460 
4461  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4462  }else{
4463  break;
4464  }
4465  }
4466 
4467  return last_non_zero;
4468 }
4469 
4470 /**
4471  * Permute an 8x8 block according to permutation.
4472  * @param block the block which will be permuted according to
4473  * the given permutation vector
4474  * @param permutation the permutation vector
4475  * @param last the last non zero coefficient in scantable order, used to
4476  * speed the permutation up
4477  * @param scantable the used scantable, this is only used to speed the
4478  * permutation up, the block is not (inverse) permutated
4479  * to scantable order!
4480  */
4481 void ff_block_permute(int16_t *block, uint8_t *permutation,
4482  const uint8_t *scantable, int last)
4483 {
4484  int i;
4485  int16_t temp[64];
4486 
4487  if (last <= 0)
4488  return;
4489  //FIXME it is ok but not clean and might fail for some permutations
4490  // if (permutation[1] == 1)
4491  // return;
4492 
4493  for (i = 0; i <= last; i++) {
4494  const int j = scantable[i];
4495  temp[j] = block[j];
4496  block[j] = 0;
4497  }
4498 
4499  for (i = 0; i <= last; i++) {
4500  const int j = scantable[i];
4501  const int perm_j = permutation[j];
4502  block[perm_j] = temp[j];
4503  }
4504 }
4505 
4507  int16_t *block, int n,
4508  int qscale, int *overflow)
4509 {
4510  int i, j, level, last_non_zero, q, start_i;
4511  const int *qmat;
4512  const uint8_t *scantable;
4513  int bias;
4514  int max=0;
4515  unsigned int threshold1, threshold2;
4516 
4517  s->fdsp.fdct(block);
4518 
4519  if(s->dct_error_sum)
4520  s->denoise_dct(s, block);
4521 
4522  if (s->mb_intra) {
4523  scantable= s->intra_scantable.scantable;
4524  if (!s->h263_aic) {
4525  if (n < 4)
4526  q = s->y_dc_scale;
4527  else
4528  q = s->c_dc_scale;
4529  q = q << 3;
4530  } else
4531  /* For AIC we skip quant/dequant of INTRADC */
4532  q = 1 << 3;
4533 
4534  /* note: block[0] is assumed to be positive */
4535  block[0] = (block[0] + (q >> 1)) / q;
4536  start_i = 1;
4537  last_non_zero = 0;
4538  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4539  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4540  } else {
4541  scantable= s->inter_scantable.scantable;
4542  start_i = 0;
4543  last_non_zero = -1;
4544  qmat = s->q_inter_matrix[qscale];
4545  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4546  }
4547  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4548  threshold2= (threshold1<<1);
4549  for(i=63;i>=start_i;i--) {
4550  j = scantable[i];
4551  level = block[j] * qmat[j];
4552 
4553  if(((unsigned)(level+threshold1))>threshold2){
4554  last_non_zero = i;
4555  break;
4556  }else{
4557  block[j]=0;
4558  }
4559  }
4560  for(i=start_i; i<=last_non_zero; i++) {
4561  j = scantable[i];
4562  level = block[j] * qmat[j];
4563 
4564 // if( bias+level >= (1<<QMAT_SHIFT)
4565 // || bias-level >= (1<<QMAT_SHIFT)){
4566  if(((unsigned)(level+threshold1))>threshold2){
4567  if(level>0){
4568  level= (bias + level)>>QMAT_SHIFT;
4569  block[j]= level;
4570  }else{
4571  level= (bias - level)>>QMAT_SHIFT;
4572  block[j]= -level;
4573  }
4574  max |=level;
4575  }else{
4576  block[j]=0;
4577  }
4578  }
4579  *overflow= s->max_qcoeff < max; //overflow might have happened
4580 
4581  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4582  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4583  ff_block_permute(block, s->idsp.idct_permutation,
4584  scantable, last_non_zero);
4585 
4586  return last_non_zero;
4587 }
4588 
4589 #define OFFSET(x) offsetof(MpegEncContext, x)
4590 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4591 static const AVOption h263_options[] = {
4592  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4593  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4595 #if FF_API_MPEGVIDEO_OPTS
4600 #endif
4601  { NULL },
4602 };
4603 
4604 static const AVClass h263_class = {
4605  .class_name = "H.263 encoder",
4606  .item_name = av_default_item_name,
4607  .option = h263_options,
4608  .version = LIBAVUTIL_VERSION_INT,
4609 };
4610 
4612  .name = "h263",
4613  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4614  .type = AVMEDIA_TYPE_VIDEO,
4615  .id = AV_CODEC_ID_H263,
4616  .priv_data_size = sizeof(MpegEncContext),
4618  .encode2 = ff_mpv_encode_picture,
4619  .close = ff_mpv_encode_end,
4622  .priv_class = &h263_class,
4623 };
4624 
4625 static const AVOption h263p_options[] = {
4626  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4627  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4628  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4629  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4631 #if FF_API_MPEGVIDEO_OPTS
4636 #endif
4637  { NULL },
4638 };
4639 static const AVClass h263p_class = {
4640  .class_name = "H.263p encoder",
4641  .item_name = av_default_item_name,
4642  .option = h263p_options,
4643  .version = LIBAVUTIL_VERSION_INT,
4644 };
4645 
4647  .name = "h263p",
4648  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4649  .type = AVMEDIA_TYPE_VIDEO,
4650  .id = AV_CODEC_ID_H263P,
4651  .priv_data_size = sizeof(MpegEncContext),
4653  .encode2 = ff_mpv_encode_picture,
4654  .close = ff_mpv_encode_end,
4655  .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4658  .priv_class = &h263p_class,
4659 };
4660 
4662  .name = "msmpeg4v2",
4663  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4664  .type = AVMEDIA_TYPE_VIDEO,
4665  .id = AV_CODEC_ID_MSMPEG4V2,
4666  .priv_class = &ff_mpv_enc_class,
4667  .priv_data_size = sizeof(MpegEncContext),
4669  .encode2 = ff_mpv_encode_picture,
4670  .close = ff_mpv_encode_end,
4673 };
4674 
4676  .name = "msmpeg4",
4677  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4678  .type = AVMEDIA_TYPE_VIDEO,
4679  .id = AV_CODEC_ID_MSMPEG4V3,
4680  .priv_class = &ff_mpv_enc_class,
4681  .priv_data_size = sizeof(MpegEncContext),
4683  .encode2 = ff_mpv_encode_picture,
4684  .close = ff_mpv_encode_end,
4687 };
4688 
4690  .name = "wmv1",
4691  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4692  .type = AVMEDIA_TYPE_VIDEO,
4693  .id = AV_CODEC_ID_WMV1,
4694  .priv_class = &ff_mpv_enc_class,
4695  .priv_data_size = sizeof(MpegEncContext),
4697  .encode2 = ff_mpv_encode_picture,
4698  .close = ff_mpv_encode_end,
4701 };
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:98
MpegEncContext::i_count
int i_count
Definition: mpegvideo.h:337
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:926
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:76
FF_MPV_DEPRECATED_A53_CC_OPT
#define FF_MPV_DEPRECATED_A53_CC_OPT
Definition: mpegvideo.h:664
ff_speedhq_end_slice
void ff_speedhq_end_slice(MpegEncContext *s)
Definition: speedhqenc.c:133
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:424
ff_wmv2_encode_picture_header
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:74
AVCodec
AVCodec.
Definition: codec.h:202
MpegEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:280
stride
int stride
Definition: mace.c:144
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:255
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
direct
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:61
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
h263data.h
ff_speedhq_encode_init
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
Definition: speedhqenc.c:85
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:99
level
uint8_t level
Definition: svq3.c:204
MpegEncContext::data_partitioning
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:394
av_clip
#define av_clip
Definition: common.h:96
set_frame_distances
static void set_frame_distances(MpegEncContext *s)
Definition: mpegvideo_enc.c:3496
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:388
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:196
H263_GOB_HEIGHT
#define H263_GOB_HEIGHT(h)
Definition: h263.h:43
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:123
r
const char * r
Definition: vf_curves.c:116
acc
int acc
Definition: yuv2rgb.c:554
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1363
MpegEncContext::b_code
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:228
mem_internal.h
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1337
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:670
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:243
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1285
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1201
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1650
ff_speedhq_encode_picture_header
void ff_speedhq_encode_picture_header(MpegEncContext *s)
Definition: speedhqenc.c:123
thread.h
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:604
encode_mb
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2446
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:210
sse_mb
static int sse_mb(MpegEncContext *s)
Definition: mpegvideo_enc.c:2592
MAX_RUN
#define MAX_RUN
Definition: rl.h:35
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:61
COPY
#define COPY(a)
ff_block_permute
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4481
ff_qpeldsp_init
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4133
ff_mjpeg_encode_picture_header
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
Definition: mjpegenc_common.c:220
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:868
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:170
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:162
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2634
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:702
update_noise_reduction
static void update_noise_reduction(MpegEncContext *s)
Definition: mpegvideo_enc.c:1620
out_size
int out_size
Definition: movenc.c:55
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:253
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:62
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
avcodec_find_encoder
const AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: allcodecs.c:916
MpegEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:395
MpegEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:323
ff_h263p_encoder
const AVCodec ff_h263p_encoder
Definition: mpegvideo_enc.c:4646
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:307
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:220
pixdesc.h
ff_msmpeg4_encode_mb
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:367
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:424
index
fg index
Definition: ffmpeg_filter.c:167
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:37
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:113
w
uint8_t w
Definition: llviddspenc.c:38
BUF_BITS
static const int BUF_BITS
Definition: put_bits.h:47
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
ff_free_picture_tables
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:454
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
Definition: mpegvideo_enc.c:1038
MpegEncContext::f_count
int f_count
Definition: mpegvideo.h:338
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:371
AVOption
AVOption.
Definition: opt.h:247
encode.h
b
#define b
Definition: input.c:40
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:99
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:115
data
const char data[16]
Definition: mxf.c:143
MpegEncContext::vbv_delay
int vbv_delay
Definition: mpegvideo.h:202
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
MpegEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideo.h:216
ff_mjpeg_encode_init
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:261
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegutils.h:120
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1923
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
ff_h261_encode_init
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:373
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2286
max
#define max(a, b)
Definition: cuda_runtime.h:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:88
mathematics.h
ff_rv20_encode_picture_header
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:35
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:472
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:247
Picture
Picture.
Definition: mpegpicture.h:45
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:106
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2615
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
MpegEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideo.h:141
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:856
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:104
ff_add_cpb_side_data
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:990
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1165
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:281
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:513
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:214
ff_set_cmp
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:475
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:539
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:428
thread.h
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
ff_msmpeg4_encode_picture_header
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:215
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
MpegEncContext::mv
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:265
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:581
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:252
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:53
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:36
init
static int init
Definition: av_tx.c:47
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:114
ff_mpeg1_encode_picture_header
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
MAX_FCODE
#define MAX_FCODE
Definition: mpegutils.h:47
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:788
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:232
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1712
sp5x.h
OFFSET
#define OFFSET(x)
Definition: mpegvideo_enc.c:4589
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:69
estimate_qp
static int estimate_qp(MpegEncContext *s, int dry_run)
Definition: mpegvideo_enc.c:3458
ff_get_best_fcode
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1598
FDCTDSPContext
Definition: fdctdsp.h:26
ff_set_mpeg4_time
void ff_set_mpeg4_time(MpegEncContext *s)
Definition: mpeg4videoenc.c:875
faandct.h
Floating point AAN DCT.
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:201
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:198
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:848
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:392
merge_context_after_me
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3420
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:318
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:112
fail
#define fail()
Definition: checkasm.h:127
h261.h
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1440
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:111
MpegEncContext::padding_bug_score
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
Definition: mpegvideo.h:401
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
get_intra_count
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1015
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1015
ff_h263dsp_init
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:463
perm
perm
Definition: f_perms.c:74
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:441
MpegEncContext::umvplus
int umvplus
== H.263+ && unrestricted_mv
Definition: mpegvideo.h:365
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2688
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:264
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1388
pts
static int64_t pts
Definition: transcode_aac.c:653
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:65
ff_h263_update_motion_val
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:53
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:239
ff_sqrt
#define ff_sqrt
Definition: mathops.h:206
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
mpv_encode_init_static
static void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:266
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:105
h263_options
static const AVOption h263_options[]
Definition: mpegvideo_enc.c:4591
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:458
flv.h
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:134
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
ff_dct_encode_init
av_cold int ff_dct_encode_init(MpegEncContext *s)
Definition: mpegvideo_enc.c:291
ff_mpeg4_encode_mb
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:477
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:260
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2745
ff_mjpeg_encode_mb
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:482
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:868
ff_mpeg4_encode_picture_header
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg4videoenc.c:1060
MpegEncContext::i_tex_bits
int i_tex_bits
Definition: mpegvideo.h:335
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1128
frame_start
static int frame_start(MpegEncContext *s)
Definition: mpegvideo_enc.c:1641
RateControlContext
rate control context.
Definition: ratecontrol.h:63
mpeg12.h
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:175
ff_init_qscale_tab
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:234
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
update_mb_info
static void update_mb_info(MpegEncContext *s, int startcode)
Definition: mpegvideo_enc.c:2722
av_cold
#define av_cold
Definition: attributes.h:90
dct.h
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
MAX_MV
#define MAX_MV
Definition: motion_est.h:35
ff_h261_get_picture_format
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:41
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:86
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:129
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4135
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:679
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:141
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:771
width
#define width
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:46
ff_h263_chroma_qscale_table
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:260
s
#define s(width, name)
Definition: cbs_vp9.c:257
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:111
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:477
encode_frame
static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1238
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:295
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
ff_mpeg2_dc_scale_table
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideodata.c:77
MpegEncContext::mv_dir
int mv_dir
Definition: mpegvideo.h:250
g
const char * g
Definition: vf_curves.c:117
sse
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2571
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
HUFFMAN_TABLE_OPTIMAL
@ HUFFMAN_TABLE_OPTIMAL
Compute and use optimal Huffman tables.
Definition: mjpegenc.h:97
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1490
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:68
bits
uint8_t bits
Definition: vp3data.h:141
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
FMT_H261
@ FMT_H261
Definition: mpegutils.h:124
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1425
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:218
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:64
ff_h263_encode_picture_header
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:104
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:241
MpegEncContext::b_count
int b_count
Definition: mpegvideo.h:339
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:865
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1194
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1377
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:104
PutBitContext
Definition: put_bits.h:49
Picture::encoding_error
uint64_t encoding_error[AV_NUM_DATA_POINTERS]
Definition: mpegpicture.h:91
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:393
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:723
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2659
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
if
if(ret)
Definition: filter_design.txt:179
Picture::reference
int reference
Definition: mpegpicture.h:88
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1179
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:440
h263_class
static const AVClass h263_class
Definition: mpegvideo_enc.c:4604
MpegEncContext::pb2
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:399
sqr
static double sqr(double x)
Definition: af_adynamicequalizer.c:84
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:173
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
PutBitContext::buf
uint8_t * buf
Definition: put_bits.h:52
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:192
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
FF_MPV_DEPRECATED_BFRAME_OPTS
#define FF_MPV_DEPRECATED_BFRAME_OPTS
Definition: mpegvideo.h:668
write_mb_info
static void write_mb_info(MpegEncContext *s)
Definition: mpegvideo_enc.c:2702
run
uint8_t run
Definition: svq3.c:203
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:277
MpegEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideo.h:321
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:227
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:331
me
#define me
Definition: vf_colormatrix.c:104
aandcttab.h
EDGE_WIDTH
#define EDGE_WIDTH
Definition: mpegpicture.h:33
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:156
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:418
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:433
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:49
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:114
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
src
#define src
Definition: vp8dsp.c:255
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2351
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:274
mathops.h
FF_MPV_DEPRECATED_MPEG_QUANT_OPT
#define FF_MPV_DEPRECATED_MPEG_QUANT_OPT
Definition: mpegvideo.h:662
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:277
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3419
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:778
ff_vbv_update
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:681
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:965
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:857
qpeldsp.h
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:137
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1229
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:67
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:232
ff_h263_encoder
const AVCodec ff_h263_encoder
Definition: mpegvideo_enc.c:4611
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegutils.h:126
wmv2.h
AVOnce
#define AVOnce
Definition: thread.h:172
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
ff_clean_h263_qscales
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
Definition: ituh263enc.c:267
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1561
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:51
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:256
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:709
mpv_encode_defaults
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:276
denoise_dct_c
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
Definition: mpegvideo_enc.c:3796
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:403
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
MAX_LEVEL
#define MAX_LEVEL
Definition: rl.h:36
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:506
get_sae
static int get_sae(uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1001
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1236
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:222
ERContext::error_count
atomic_int error_count
Definition: error_resilience.h:64
merge_context_after_encode
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3426
MpegEncContext::mb_intra
int mb_intra
Definition: mpegvideo.h:279
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:74
VE
#define VE
Definition: mpegvideo_enc.c:4590
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:243
AVPacket::size
int size
Definition: packet.h:374
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:355
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:578
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:194
load_input_picture
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1046
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:57
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:325
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
Definition: mpegvideo_enc.c:1947
CONFIG_MSMPEG4_ENCODER
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:75
ff_msmpeg4v2_encoder
const AVCodec ff_msmpeg4v2_encoder
Definition: mpegvideo_enc.c:4661
ff_msmpeg4v3_encoder
const AVCodec ff_msmpeg4v3_encoder
Definition: mpegvideo_enc.c:4675
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:289
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1061
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:114
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:38
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:197
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:335
MpegEncContext::dct_count
int dct_count[2]
Definition: mpegvideo.h:322
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegutils.h:127
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:54
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:974
size
int size
Definition: twinvq_data.h:10344
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:88
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:157
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:118
ff_mpeg1_encode_mb
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1452
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:93
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:114
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:372
encode_mb_internal
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
Definition: mpegvideo_enc.c:2061
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:235
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:346
MpegEncContext::mv_type
int mv_type
Definition: mpegvideo.h:254
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:117
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:275
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:258
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:379
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
MpegEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideo.h:118
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:119
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:158
rv10.h
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:1283
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:215
M_PI
#define M_PI
Definition: mathematics.h:52
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
PutBitContext::buf_end
uint8_t * buf_end
Definition: put_bits.h:52
AVCodec::id
enum AVCodecID id
Definition: codec.h:216
update_qscale
static void update_qscale(MpegEncContext *s)
Definition: mpegvideo_enc.c:190
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:478
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:741
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:134
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:57
encode_mb_hq
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2520
src1
#define src1
Definition: h264pred.c:140
ff_msmpeg4_encode_init
void ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:136
ff_speedhq_encode_mb
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: speedhqenc.c:228
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:214
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:482
FMT_H263
@ FMT_H263
Definition: mpegutils.h:125
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:55
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:476
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:366
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:79
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3820
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2781
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:34
estimate_best_b_count
static int estimate_best_b_count(MpegEncContext *s)
Definition: mpegvideo_enc.c:1259
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:884
MpegEncContext::obmc
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:357
MpegEncContext::tex_pb
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:398
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:109
FF_MPV_DEPRECATED_MATRIX_OPT
#define FF_MPV_DEPRECATED_MATRIX_OPT
Definition: mpegvideo.h:666
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:277
qmat16
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:56
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_wmv1_encoder
const AVCodec ff_wmv1_encoder
Definition: mpegvideo_enc.c:4689
get_visual_weight
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2037
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:789
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:435
ff_flv_encode_picture_header
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:27
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
h263p_class
static const AVClass h263p_class
Definition: mpegvideo_enc.c:4639
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1324
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:145
MpegEncContext::er
ERContext er
Definition: mpegvideo.h:559
ff_init_me
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
AVCodecContext::height
int height
Definition: avcodec.h:556
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:355
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:672
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:108
MpegEncContext::picture_number
int picture_number
Definition: mpegvideo.h:117
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
idctdsp.h
MpegEncContext::h263_slice_structured
int h263_slice_structured
Definition: mpegvideo.h:367
avcodec.h
msmpeg4.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
get_bits_diff
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:756
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:282
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:125
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1280
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:116
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
ff_mjpeg_encode_close
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:323
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:146
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:584
ff_h261_encode_mb
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:238
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:247
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:32
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:157
ff_fix_long_mvs
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1699
MpegEncContext::dquant
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:200
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:730
AVCodecContext
main external API structure.
Definition: avcodec.h:383
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1459
default_mv_penalty
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:85
Picture::shared
int shared
Definition: mpegpicture.h:89
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:69
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:885
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:499
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:369
MpegEncContext::skip_count
int skip_count
Definition: mpegvideo.h:340
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:103
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: avpacket.c:232
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1158
AVRational::den
int den
Denominator.
Definition: rational.h:60
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:46
MpegEncContext::gop_picture_number
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & MPEG-1 specific
Definition: mpegvideo.h:444
select_input_picture
static int select_input_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:1406
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:716
frame_end
static void frame_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:1583
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2267
temp
else temp
Definition: vf_mcdeint.c:248
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:855
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:77
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:109
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:859
shift
static int shift(int a, int b)
Definition: sonic.c:83
desc
const char * desc
Definition: libsvtav1.c:79
ff_wmv2_encode_mb
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:147
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
ff_speedhq_mb_y_order_to_mb
int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.c:256
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:655
MpegEncContext::noise_reduction
int noise_reduction
Definition: mpegvideo.h:575
ff_convert_matrix
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:106
packet_internal.h
ff_mpeg1_encode_init
void ff_mpeg1_encode_init(MpegEncContext *s)
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
skip_check
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
Definition: mpegvideo_enc.c:1197
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:310
MpegEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideo.h:569
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1208
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:142
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
dct_quantize_refine
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4154
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:582
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:251
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:983
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:858
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
clip_coeffs
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
Definition: mpegvideo_enc.c:2003
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:877
MAX_B_FRAMES
#define MAX_B_FRAMES
Definition: mpegvideo.h:66
encode_picture
static int encode_picture(MpegEncContext *s, int picture_number)
Definition: mpegvideo_enc.c:3510
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
d
d
Definition: ffmpeg_filter.c:153
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:78
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:583
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:341
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:411
copy_context_after_encode
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
Definition: mpegvideo_enc.c:2481
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:66
ff_get_2pass_fcode
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:857
h
h
Definition: vp9dsp_template.c:2038
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:144
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
copy_context_before_encode
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
Definition: mpegvideo_enc.c:2453
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:71
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:109
ff_dct_quantize_c
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4506
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
int
int
Definition: ffmpeg_filter.c:153
AVFrame::display_picture_number
int display_picture_number
picture number in display order
Definition: frame.h:448
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:277
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:52
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:609
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:71
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:34
mb_info
Definition: cinepakenc.c:88
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:186
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:753
MpegEncContext::alt_inter_vlc
int alt_inter_vlc
alternative inter vlc
Definition: mpegvideo.h:368
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:336
ff_h263_encode_mb
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:448
pixblockdsp.h
ff_h261_encode_picture_header
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:54
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:110
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:976
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:231
h263p_options
static const AVOption h263p_options[]
Definition: mpegvideo_enc.c:4625
h263.h
write_slice_end
static void write_slice_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:2682
intmath.h