FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
42 #include "avcodec.h"
43 #include "dct.h"
44 #include "idctdsp.h"
45 #include "mpeg12.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
48 #include "h261.h"
49 #include "h263.h"
50 #include "h263data.h"
51 #include "mjpegenc_common.h"
52 #include "mathops.h"
53 #include "mpegutils.h"
54 #include "mjpegenc.h"
55 #include "msmpeg4.h"
56 #include "pixblockdsp.h"
57 #include "qpeldsp.h"
58 #include "faandct.h"
59 #include "thread.h"
60 #include "aandcttab.h"
61 #include "flv.h"
62 #include "mpeg4video.h"
63 #include "internal.h"
64 #include "bytestream.h"
65 #include "wmv2.h"
66 #include "rv10.h"
67 #include <limits.h>
68 #include "sp5x.h"
69 
70 #define QUANT_BIAS_SHIFT 8
71 
72 #define QMAT_SHIFT_MMX 16
73 #define QMAT_SHIFT 21
74 
76 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
77 static int sse_mb(MpegEncContext *s);
78 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
79 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
80 
83 
86  { NULL },
87 };
88 
89 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
90  uint16_t (*qmat16)[2][64],
91  const uint16_t *quant_matrix,
92  int bias, int qmin, int qmax, int intra)
93 {
94  FDCTDSPContext *fdsp = &s->fdsp;
95  int qscale;
96  int shift = 0;
97 
98  for (qscale = qmin; qscale <= qmax; qscale++) {
99  int i;
100  int qscale2;
101 
103  else qscale2 = qscale << 1;
104 
105  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
106 #if CONFIG_FAANDCT
107  fdsp->fdct == ff_faandct ||
108 #endif /* CONFIG_FAANDCT */
109  fdsp->fdct == ff_jpeg_fdct_islow_10) {
110  for (i = 0; i < 64; i++) {
111  const int j = s->idsp.idct_permutation[i];
112  int64_t den = (int64_t) qscale2 * quant_matrix[j];
113  /* 16 <= qscale * quant_matrix[i] <= 7905
114  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
115  * 19952 <= x <= 249205026
116  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
117  * 3444240 >= (1 << 36) / (x) >= 275 */
118 
119  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
120  }
121  } else if (fdsp->fdct == ff_fdct_ifast) {
122  for (i = 0; i < 64; i++) {
123  const int j = s->idsp.idct_permutation[i];
124  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
125  /* 16 <= qscale * quant_matrix[i] <= 7905
126  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
127  * 19952 <= x <= 249205026
128  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
129  * 3444240 >= (1 << 36) / (x) >= 275 */
130 
131  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
132  }
133  } else {
134  for (i = 0; i < 64; i++) {
135  const int j = s->idsp.idct_permutation[i];
136  int64_t den = (int64_t) qscale2 * quant_matrix[j];
137  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
138  * Assume x = qscale * quant_matrix[i]
139  * So 16 <= x <= 7905
140  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
141  * so 32768 >= (1 << 19) / (x) >= 67 */
142  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
143  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
144  // (qscale * quant_matrix[i]);
145  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
146 
147  if (qmat16[qscale][0][i] == 0 ||
148  qmat16[qscale][0][i] == 128 * 256)
149  qmat16[qscale][0][i] = 128 * 256 - 1;
150  qmat16[qscale][1][i] =
151  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
152  qmat16[qscale][0][i]);
153  }
154  }
155 
156  for (i = intra; i < 64; i++) {
157  int64_t max = 8191;
158  if (fdsp->fdct == ff_fdct_ifast) {
159  max = (8191LL * ff_aanscales[i]) >> 14;
160  }
161  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
162  shift++;
163  }
164  }
165  }
166  if (shift) {
168  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
169  QMAT_SHIFT - shift);
170  }
171 }
172 
173 static inline void update_qscale(MpegEncContext *s)
174 {
175  if (s->q_scale_type == 1 && 0) {
176  int i;
177  int bestdiff=INT_MAX;
178  int best = 1;
179 
180  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
181  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
182  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
184  continue;
185  if (diff < bestdiff) {
186  bestdiff = diff;
187  best = i;
188  }
189  }
190  s->qscale = best;
191  } else {
192  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
193  (FF_LAMBDA_SHIFT + 7);
194  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
195  }
196 
197  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
199 }
200 
201 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
202 {
203  int i;
204 
205  if (matrix) {
206  put_bits(pb, 1, 1);
207  for (i = 0; i < 64; i++) {
208  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
209  }
210  } else
211  put_bits(pb, 1, 0);
212 }
213 
214 /**
215  * init s->current_picture.qscale_table from s->lambda_table
216  */
218 {
219  int8_t * const qscale_table = s->current_picture.qscale_table;
220  int i;
221 
222  for (i = 0; i < s->mb_num; i++) {
223  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
224  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
225  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
226  s->avctx->qmax);
227  }
228 }
229 
232 {
233 #define COPY(a) dst->a= src->a
234  COPY(pict_type);
236  COPY(f_code);
237  COPY(b_code);
238  COPY(qscale);
239  COPY(lambda);
240  COPY(lambda2);
243  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
244  COPY(progressive_frame); // FIXME don't set in encode_header
245  COPY(partitioned_frame); // FIXME don't set in encode_header
246 #undef COPY
247 }
248 
249 /**
250  * Set the given MpegEncContext to defaults for encoding.
251  * the changed fields will not depend upon the prior state of the MpegEncContext.
252  */
254 {
255  int i;
257 
258  for (i = -16; i < 16; i++) {
259  default_fcode_tab[i + MAX_MV] = 1;
260  }
263 
264  s->input_picture_number = 0;
265  s->picture_in_gop_number = 0;
266 }
267 
269  if (ARCH_X86)
271 
272  if (CONFIG_H263_ENCODER)
274  if (!s->dct_quantize)
276  if (!s->denoise_dct)
279  if (s->avctx->trellis)
281 
282  return 0;
283 }
284 
285 /* init video encoder */
287 {
288  MpegEncContext *s = avctx->priv_data;
289  AVCPBProperties *cpb_props;
290  int i, ret, format_supported;
291 
293 
294  switch (avctx->codec_id) {
296  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
297  avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
298  av_log(avctx, AV_LOG_ERROR,
299  "only YUV420 and YUV422 are supported\n");
300  return -1;
301  }
302  break;
303  case AV_CODEC_ID_MJPEG:
304  case AV_CODEC_ID_AMV:
305  format_supported = 0;
306  /* JPEG color space */
307  if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
308  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
309  avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
310  (avctx->color_range == AVCOL_RANGE_JPEG &&
311  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
312  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
313  avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
314  format_supported = 1;
315  /* MPEG color space */
316  else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
317  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
318  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
319  avctx->pix_fmt == AV_PIX_FMT_YUV444P))
320  format_supported = 1;
321 
322  if (!format_supported) {
323  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
324  return -1;
325  }
326  break;
327  default:
328  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
329  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
330  return -1;
331  }
332  }
333 
334  switch (avctx->pix_fmt) {
335  case AV_PIX_FMT_YUVJ444P:
336  case AV_PIX_FMT_YUV444P:
338  break;
339  case AV_PIX_FMT_YUVJ422P:
340  case AV_PIX_FMT_YUV422P:
342  break;
343  case AV_PIX_FMT_YUVJ420P:
344  case AV_PIX_FMT_YUV420P:
345  default:
347  break;
348  }
349 
350  avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
351 
352 #if FF_API_PRIVATE_OPT
354  if (avctx->rtp_payload_size)
356  if (avctx->me_penalty_compensation)
358  if (avctx->pre_me)
359  s->me_pre = avctx->pre_me;
361 #endif
362 
363  s->bit_rate = avctx->bit_rate;
364  s->width = avctx->width;
365  s->height = avctx->height;
366  if (avctx->gop_size > 600 &&
368  av_log(avctx, AV_LOG_WARNING,
369  "keyframe interval too large!, reducing it from %d to %d\n",
370  avctx->gop_size, 600);
371  avctx->gop_size = 600;
372  }
373  s->gop_size = avctx->gop_size;
374  s->avctx = avctx;
375  if (avctx->max_b_frames > MAX_B_FRAMES) {
376  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
377  "is %d.\n", MAX_B_FRAMES);
378  avctx->max_b_frames = MAX_B_FRAMES;
379  }
380  s->max_b_frames = avctx->max_b_frames;
381  s->codec_id = avctx->codec->id;
383  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
384  s->rtp_mode = !!s->rtp_payload_size;
386 
387  // workaround some differences between how applications specify dc precision
388  if (s->intra_dc_precision < 0) {
389  s->intra_dc_precision += 8;
390  } else if (s->intra_dc_precision >= 8)
391  s->intra_dc_precision -= 8;
392 
393  if (s->intra_dc_precision < 0) {
394  av_log(avctx, AV_LOG_ERROR,
395  "intra dc precision must be positive, note some applications use"
396  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
397  return AVERROR(EINVAL);
398  }
399 
400  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
401  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
402  return AVERROR(EINVAL);
403  }
405 
406  if (s->gop_size <= 1) {
407  s->intra_only = 1;
408  s->gop_size = 12;
409  } else {
410  s->intra_only = 0;
411  }
412 
413 #if FF_API_MOTION_EST
415  s->me_method = avctx->me_method;
417 #endif
418 
419  /* Fixed QSCALE */
420  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
421 
422 #if FF_API_MPV_OPT
424  if (avctx->border_masking != 0.0)
425  s->border_masking = avctx->border_masking;
427 #endif
428 
429  s->adaptive_quant = (s->avctx->lumi_masking ||
430  s->avctx->dark_masking ||
433  s->avctx->p_masking ||
434  s->border_masking ||
435  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
436  !s->fixed_qscale;
437 
439 
440  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
441  switch(avctx->codec_id) {
444  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
445  break;
446  case AV_CODEC_ID_MPEG4:
450  if (avctx->rc_max_rate >= 15000000) {
451  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
452  } else if(avctx->rc_max_rate >= 2000000) {
453  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
454  } else if(avctx->rc_max_rate >= 384000) {
455  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
456  } else
457  avctx->rc_buffer_size = 40;
458  avctx->rc_buffer_size *= 16384;
459  break;
460  }
461  if (avctx->rc_buffer_size) {
462  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
463  }
464  }
465 
466  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
467  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
468  return -1;
469  }
470 
471  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
472  av_log(avctx, AV_LOG_INFO,
473  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
474  }
475 
476  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
477  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
478  return -1;
479  }
480 
481  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
482  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
483  return -1;
484  }
485 
486  if (avctx->rc_max_rate &&
487  avctx->rc_max_rate == avctx->bit_rate &&
488  avctx->rc_max_rate != avctx->rc_min_rate) {
489  av_log(avctx, AV_LOG_INFO,
490  "impossible bitrate constraints, this will fail\n");
491  }
492 
493  if (avctx->rc_buffer_size &&
494  avctx->bit_rate * (int64_t)avctx->time_base.num >
495  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
496  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
497  return -1;
498  }
499 
500  if (!s->fixed_qscale &&
501  avctx->bit_rate * av_q2d(avctx->time_base) >
502  avctx->bit_rate_tolerance) {
503  av_log(avctx, AV_LOG_WARNING,
504  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, (int64_t)avctx->bit_rate);
505  avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
506  }
507 
508  if (s->avctx->rc_max_rate &&
509  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
512  90000LL * (avctx->rc_buffer_size - 1) >
513  s->avctx->rc_max_rate * 0xFFFFLL) {
514  av_log(avctx, AV_LOG_INFO,
515  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
516  "specified vbv buffer is too large for the given bitrate!\n");
517  }
518 
519  if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
521  s->codec_id != AV_CODEC_ID_FLV1) {
522  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
523  return -1;
524  }
525 
526  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
527  av_log(avctx, AV_LOG_ERROR,
528  "OBMC is only supported with simple mb decision\n");
529  return -1;
530  }
531 
532  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
533  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
534  return -1;
535  }
536 
537  if (s->max_b_frames &&
538  s->codec_id != AV_CODEC_ID_MPEG4 &&
541  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
542  return -1;
543  }
544  if (s->max_b_frames < 0) {
545  av_log(avctx, AV_LOG_ERROR,
546  "max b frames must be 0 or positive for mpegvideo based encoders\n");
547  return -1;
548  }
549 
550  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
551  s->codec_id == AV_CODEC_ID_H263 ||
552  s->codec_id == AV_CODEC_ID_H263P) &&
553  (avctx->sample_aspect_ratio.num > 255 ||
554  avctx->sample_aspect_ratio.den > 255)) {
555  av_log(avctx, AV_LOG_WARNING,
556  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
559  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
560  }
561 
562  if ((s->codec_id == AV_CODEC_ID_H263 ||
563  s->codec_id == AV_CODEC_ID_H263P) &&
564  (avctx->width > 2048 ||
565  avctx->height > 1152 )) {
566  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
567  return -1;
568  }
569  if ((s->codec_id == AV_CODEC_ID_H263 ||
570  s->codec_id == AV_CODEC_ID_H263P) &&
571  ((avctx->width &3) ||
572  (avctx->height&3) )) {
573  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
574  return -1;
575  }
576 
577  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
578  (avctx->width > 4095 ||
579  avctx->height > 4095 )) {
580  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
581  return -1;
582  }
583 
584  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
585  (avctx->width > 16383 ||
586  avctx->height > 16383 )) {
587  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
588  return -1;
589  }
590 
591  if (s->codec_id == AV_CODEC_ID_RV10 &&
592  (avctx->width &15 ||
593  avctx->height&15 )) {
594  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
595  return AVERROR(EINVAL);
596  }
597 
598  if (s->codec_id == AV_CODEC_ID_RV20 &&
599  (avctx->width &3 ||
600  avctx->height&3 )) {
601  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
602  return AVERROR(EINVAL);
603  }
604 
605  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
606  s->codec_id == AV_CODEC_ID_WMV2) &&
607  avctx->width & 1) {
608  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
609  return -1;
610  }
611 
614  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
615  return -1;
616  }
617 
618 #if FF_API_PRIVATE_OPT
620  if (avctx->mpeg_quant)
621  s->mpeg_quant = avctx->mpeg_quant;
623 #endif
624 
625  // FIXME mpeg2 uses that too
626  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
627  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
628  av_log(avctx, AV_LOG_ERROR,
629  "mpeg2 style quantization not supported by codec\n");
630  return -1;
631  }
632 
633  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
634  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
635  return -1;
636  }
637 
638  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
640  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
641  return -1;
642  }
643 
644 #if FF_API_PRIVATE_OPT
646  if (avctx->scenechange_threshold)
649 #endif
650 
651  if (s->scenechange_threshold < 1000000000 &&
653  av_log(avctx, AV_LOG_ERROR,
654  "closed gop with scene change detection are not supported yet, "
655  "set threshold to 1000000000\n");
656  return -1;
657  }
658 
659  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
660  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
661  av_log(avctx, AV_LOG_ERROR,
662  "low delay forcing is only available for mpeg2\n");
663  return -1;
664  }
665  if (s->max_b_frames != 0) {
666  av_log(avctx, AV_LOG_ERROR,
667  "B-frames cannot be used with low delay\n");
668  return -1;
669  }
670  }
671 
672  if (s->q_scale_type == 1) {
673  if (avctx->qmax > 28) {
674  av_log(avctx, AV_LOG_ERROR,
675  "non linear quant only supports qmax <= 28 currently\n");
676  return -1;
677  }
678  }
679 
680  if (avctx->slices > 1 &&
681  (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
682  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
683  return AVERROR(EINVAL);
684  }
685 
686  if (s->avctx->thread_count > 1 &&
687  s->codec_id != AV_CODEC_ID_MPEG4 &&
690  s->codec_id != AV_CODEC_ID_MJPEG &&
691  (s->codec_id != AV_CODEC_ID_H263P)) {
692  av_log(avctx, AV_LOG_ERROR,
693  "multi threaded encoding not supported by codec\n");
694  return -1;
695  }
696 
697  if (s->avctx->thread_count < 1) {
698  av_log(avctx, AV_LOG_ERROR,
699  "automatic thread number detection not supported by codec, "
700  "patch welcome\n");
701  return -1;
702  }
703 
704  if (!avctx->time_base.den || !avctx->time_base.num) {
705  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
706  return -1;
707  }
708 
709 #if FF_API_PRIVATE_OPT
711  if (avctx->b_frame_strategy)
713  if (avctx->b_sensitivity != 40)
714  s->b_sensitivity = avctx->b_sensitivity;
716 #endif
717 
718  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
719  av_log(avctx, AV_LOG_INFO,
720  "notice: b_frame_strategy only affects the first pass\n");
721  s->b_frame_strategy = 0;
722  }
723 
724  i = av_gcd(avctx->time_base.den, avctx->time_base.num);
725  if (i > 1) {
726  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
727  avctx->time_base.den /= i;
728  avctx->time_base.num /= i;
729  //return -1;
730  }
731 
733  // (a + x * 3 / 8) / x
734  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
735  s->inter_quant_bias = 0;
736  } else {
737  s->intra_quant_bias = 0;
738  // (a - x / 4) / x
739  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
740  }
741 
742  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
743  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
744  return AVERROR(EINVAL);
745  }
746 
747 #if FF_API_QUANT_BIAS
754 #endif
755 
756  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
757 
758  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
759  s->avctx->time_base.den > (1 << 16) - 1) {
760  av_log(avctx, AV_LOG_ERROR,
761  "timebase %d/%d not supported by MPEG 4 standard, "
762  "the maximum admitted value for the timebase denominator "
763  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
764  (1 << 16) - 1);
765  return -1;
766  }
767  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
768 
769  switch (avctx->codec->id) {
771  s->out_format = FMT_MPEG1;
773  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
774  break;
776  s->out_format = FMT_MPEG1;
778  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
779  s->rtp_mode = 1;
780  break;
781  case AV_CODEC_ID_MJPEG:
782  case AV_CODEC_ID_AMV:
783  s->out_format = FMT_MJPEG;
784  s->intra_only = 1; /* force intra only for jpeg */
785  if (!CONFIG_MJPEG_ENCODER ||
786  ff_mjpeg_encode_init(s) < 0)
787  return -1;
788  avctx->delay = 0;
789  s->low_delay = 1;
790  break;
791  case AV_CODEC_ID_H261:
792  if (!CONFIG_H261_ENCODER)
793  return -1;
794  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
795  av_log(avctx, AV_LOG_ERROR,
796  "The specified picture size of %dx%d is not valid for the "
797  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
798  s->width, s->height);
799  return -1;
800  }
801  s->out_format = FMT_H261;
802  avctx->delay = 0;
803  s->low_delay = 1;
804  s->rtp_mode = 0; /* Sliced encoding not supported */
805  break;
806  case AV_CODEC_ID_H263:
807  if (!CONFIG_H263_ENCODER)
808  return -1;
810  s->width, s->height) == 8) {
811  av_log(avctx, AV_LOG_ERROR,
812  "The specified picture size of %dx%d is not valid for "
813  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
814  "352x288, 704x576, and 1408x1152. "
815  "Try H.263+.\n", s->width, s->height);
816  return -1;
817  }
818  s->out_format = FMT_H263;
819  avctx->delay = 0;
820  s->low_delay = 1;
821  break;
822  case AV_CODEC_ID_H263P:
823  s->out_format = FMT_H263;
824  s->h263_plus = 1;
825  /* Fx */
826  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
827  s->modified_quant = s->h263_aic;
828  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
829  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
830 
831  /* /Fx */
832  /* These are just to be sure */
833  avctx->delay = 0;
834  s->low_delay = 1;
835  break;
836  case AV_CODEC_ID_FLV1:
837  s->out_format = FMT_H263;
838  s->h263_flv = 2; /* format = 1; 11-bit codes */
839  s->unrestricted_mv = 1;
840  s->rtp_mode = 0; /* don't allow GOB */
841  avctx->delay = 0;
842  s->low_delay = 1;
843  break;
844  case AV_CODEC_ID_RV10:
845  s->out_format = FMT_H263;
846  avctx->delay = 0;
847  s->low_delay = 1;
848  break;
849  case AV_CODEC_ID_RV20:
850  s->out_format = FMT_H263;
851  avctx->delay = 0;
852  s->low_delay = 1;
853  s->modified_quant = 1;
854  s->h263_aic = 1;
855  s->h263_plus = 1;
856  s->loop_filter = 1;
857  s->unrestricted_mv = 0;
858  break;
859  case AV_CODEC_ID_MPEG4:
860  s->out_format = FMT_H263;
861  s->h263_pred = 1;
862  s->unrestricted_mv = 1;
863  s->low_delay = s->max_b_frames ? 0 : 1;
864  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
865  break;
867  s->out_format = FMT_H263;
868  s->h263_pred = 1;
869  s->unrestricted_mv = 1;
870  s->msmpeg4_version = 2;
871  avctx->delay = 0;
872  s->low_delay = 1;
873  break;
875  s->out_format = FMT_H263;
876  s->h263_pred = 1;
877  s->unrestricted_mv = 1;
878  s->msmpeg4_version = 3;
879  s->flipflop_rounding = 1;
880  avctx->delay = 0;
881  s->low_delay = 1;
882  break;
883  case AV_CODEC_ID_WMV1:
884  s->out_format = FMT_H263;
885  s->h263_pred = 1;
886  s->unrestricted_mv = 1;
887  s->msmpeg4_version = 4;
888  s->flipflop_rounding = 1;
889  avctx->delay = 0;
890  s->low_delay = 1;
891  break;
892  case AV_CODEC_ID_WMV2:
893  s->out_format = FMT_H263;
894  s->h263_pred = 1;
895  s->unrestricted_mv = 1;
896  s->msmpeg4_version = 5;
897  s->flipflop_rounding = 1;
898  avctx->delay = 0;
899  s->low_delay = 1;
900  break;
901  default:
902  return -1;
903  }
904 
905 #if FF_API_PRIVATE_OPT
907  if (avctx->noise_reduction)
908  s->noise_reduction = avctx->noise_reduction;
910 #endif
911 
912  avctx->has_b_frames = !s->low_delay;
913 
914  s->encoding = 1;
915 
916  s->progressive_frame =
919  s->alternate_scan);
920 
921  /* init */
922  ff_mpv_idct_init(s);
923  if (ff_mpv_common_init(s) < 0)
924  return -1;
925 
926  ff_fdctdsp_init(&s->fdsp, avctx);
927  ff_me_cmp_init(&s->mecc, avctx);
929  ff_pixblockdsp_init(&s->pdsp, avctx);
930  ff_qpeldsp_init(&s->qdsp);
931 
932  if (s->msmpeg4_version) {
934  2 * 2 * (MAX_LEVEL + 1) *
935  (MAX_RUN + 1) * 2 * sizeof(int), fail);
936  }
937  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
938 
939  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
940  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
941  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
942  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
943  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
944  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
946  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
948  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
949 
950 
951  if (s->noise_reduction) {
953  2 * 64 * sizeof(uint16_t), fail);
954  }
955 
957 
958  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
960 
961  if (s->slice_context_count > 1) {
962  s->rtp_mode = 1;
963 
964  if (avctx->codec_id == AV_CODEC_ID_H263P)
965  s->h263_slice_structured = 1;
966  }
967 
968  s->quant_precision = 5;
969 
970 #if FF_API_PRIVATE_OPT
972  if (avctx->frame_skip_threshold)
974  if (avctx->frame_skip_factor)
976  if (avctx->frame_skip_exp)
977  s->frame_skip_exp = avctx->frame_skip_exp;
978  if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
979  s->frame_skip_cmp = avctx->frame_skip_cmp;
981 #endif
982 
985 
986  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
988  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
991  if ((ret = ff_msmpeg4_encode_init(s)) < 0)
992  return ret;
993  if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
994  && s->out_format == FMT_MPEG1)
996 
997  /* init q matrix */
998  for (i = 0; i < 64; i++) {
999  int j = s->idsp.idct_permutation[i];
1000  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1001  s->mpeg_quant) {
1004  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1005  s->intra_matrix[j] =
1007  } else {
1008  /* MPEG-1/2 */
1009  s->chroma_intra_matrix[j] =
1012  }
1013  if (s->avctx->intra_matrix)
1014  s->intra_matrix[j] = s->avctx->intra_matrix[i];
1015  if (s->avctx->inter_matrix)
1016  s->inter_matrix[j] = s->avctx->inter_matrix[i];
1017  }
1018 
1019  /* precompute matrix */
1020  /* for mjpeg, we do include qscale in the matrix */
1021  if (s->out_format != FMT_MJPEG) {
1023  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1024  31, 1);
1026  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1027  31, 0);
1028  }
1029 
1030  if (ff_rate_control_init(s) < 0)
1031  return -1;
1032 
1033 #if FF_API_ERROR_RATE
1035  if (avctx->error_rate)
1036  s->error_rate = avctx->error_rate;
1038 #endif
1039 
1040 #if FF_API_NORMALIZE_AQP
1042  if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
1043  s->mpv_flags |= FF_MPV_FLAG_NAQ;
1045 #endif
1046 
1047 #if FF_API_MV0
1049  if (avctx->flags & CODEC_FLAG_MV0)
1050  s->mpv_flags |= FF_MPV_FLAG_MV0;
1052 #endif
1053 
1054 #if FF_API_MPV_OPT
1056  if (avctx->rc_qsquish != 0.0)
1057  s->rc_qsquish = avctx->rc_qsquish;
1058  if (avctx->rc_qmod_amp != 0.0)
1059  s->rc_qmod_amp = avctx->rc_qmod_amp;
1060  if (avctx->rc_qmod_freq)
1061  s->rc_qmod_freq = avctx->rc_qmod_freq;
1062  if (avctx->rc_buffer_aggressivity != 1.0)
1064  if (avctx->rc_initial_cplx != 0.0)
1065  s->rc_initial_cplx = avctx->rc_initial_cplx;
1066  if (avctx->lmin)
1067  s->lmin = avctx->lmin;
1068  if (avctx->lmax)
1069  s->lmax = avctx->lmax;
1070 
1071  if (avctx->rc_eq) {
1072  av_freep(&s->rc_eq);
1073  s->rc_eq = av_strdup(avctx->rc_eq);
1074  if (!s->rc_eq)
1075  return AVERROR(ENOMEM);
1076  }
1078 #endif
1079 
1080 #if FF_API_PRIVATE_OPT
1082  if (avctx->brd_scale)
1083  s->brd_scale = avctx->brd_scale;
1084 
1085  if (avctx->prediction_method)
1086  s->pred = avctx->prediction_method + 1;
1088 #endif
1089 
1090  if (s->b_frame_strategy == 2) {
1091  for (i = 0; i < s->max_b_frames + 2; i++) {
1092  s->tmp_frames[i] = av_frame_alloc();
1093  if (!s->tmp_frames[i])
1094  return AVERROR(ENOMEM);
1095 
1097  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1098  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1099 
1100  ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1101  if (ret < 0)
1102  return ret;
1103  }
1104  }
1105 
1106  cpb_props = ff_add_cpb_side_data(avctx);
1107  if (!cpb_props)
1108  return AVERROR(ENOMEM);
1109  cpb_props->max_bitrate = avctx->rc_max_rate;
1110  cpb_props->min_bitrate = avctx->rc_min_rate;
1111  cpb_props->avg_bitrate = avctx->bit_rate;
1112  cpb_props->buffer_size = avctx->rc_buffer_size;
1113 
1114  return 0;
1115 fail:
1116  ff_mpv_encode_end(avctx);
1117  return AVERROR_UNKNOWN;
1118 }
1119 
1121 {
1122  MpegEncContext *s = avctx->priv_data;
1123  int i;
1124 
1126 
1127  ff_mpv_common_end(s);
1128  if (CONFIG_MJPEG_ENCODER &&
1129  s->out_format == FMT_MJPEG)
1131 
1132  av_freep(&avctx->extradata);
1133 
1134  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1135  av_frame_free(&s->tmp_frames[i]);
1136 
1139 
1140  av_freep(&s->avctx->stats_out);
1141  av_freep(&s->ac_stats);
1142 
1147  av_freep(&s->q_intra_matrix);
1148  av_freep(&s->q_inter_matrix);
1151  av_freep(&s->input_picture);
1153  av_freep(&s->dct_offset);
1154 
1155  return 0;
1156 }
1157 
1158 static int get_sae(uint8_t *src, int ref, int stride)
1159 {
1160  int x,y;
1161  int acc = 0;
1162 
1163  for (y = 0; y < 16; y++) {
1164  for (x = 0; x < 16; x++) {
1165  acc += FFABS(src[x + y * stride] - ref);
1166  }
1167  }
1168 
1169  return acc;
1170 }
1171 
1173  uint8_t *ref, int stride)
1174 {
1175  int x, y, w, h;
1176  int acc = 0;
1177 
1178  w = s->width & ~15;
1179  h = s->height & ~15;
1180 
1181  for (y = 0; y < h; y += 16) {
1182  for (x = 0; x < w; x += 16) {
1183  int offset = x + y * stride;
1184  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1185  stride, 16);
1186  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1187  int sae = get_sae(src + offset, mean, stride);
1188 
1189  acc += sae + 500 < sad;
1190  }
1191  }
1192  return acc;
1193 }
1194 
1195 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1196 {
1197  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1199  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1200  &s->linesize, &s->uvlinesize);
1201 }
1202 
1203 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1204 {
1205  Picture *pic = NULL;
1206  int64_t pts;
1207  int i, display_picture_number = 0, ret;
1208  int encoding_delay = s->max_b_frames ? s->max_b_frames
1209  : (s->low_delay ? 0 : 1);
1210  int flush_offset = 1;
1211  int direct = 1;
1212 
1213  if (pic_arg) {
1214  pts = pic_arg->pts;
1215  display_picture_number = s->input_picture_number++;
1216 
1217  if (pts != AV_NOPTS_VALUE) {
1218  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1219  int64_t last = s->user_specified_pts;
1220 
1221  if (pts <= last) {
1223  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1224  pts, last);
1225  return AVERROR(EINVAL);
1226  }
1227 
1228  if (!s->low_delay && display_picture_number == 1)
1229  s->dts_delta = pts - last;
1230  }
1231  s->user_specified_pts = pts;
1232  } else {
1233  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1234  s->user_specified_pts =
1235  pts = s->user_specified_pts + 1;
1236  av_log(s->avctx, AV_LOG_INFO,
1237  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1238  pts);
1239  } else {
1240  pts = display_picture_number;
1241  }
1242  }
1243 
1244  if (!pic_arg->buf[0] ||
1245  pic_arg->linesize[0] != s->linesize ||
1246  pic_arg->linesize[1] != s->uvlinesize ||
1247  pic_arg->linesize[2] != s->uvlinesize)
1248  direct = 0;
1249  if ((s->width & 15) || (s->height & 15))
1250  direct = 0;
1251  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1252  direct = 0;
1253  if (s->linesize & (STRIDE_ALIGN-1))
1254  direct = 0;
1255 
1256  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1257  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1258 
1259  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1260  if (i < 0)
1261  return i;
1262 
1263  pic = &s->picture[i];
1264  pic->reference = 3;
1265 
1266  if (direct) {
1267  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1268  return ret;
1269  }
1270  ret = alloc_picture(s, pic, direct);
1271  if (ret < 0)
1272  return ret;
1273 
1274  if (!direct) {
1275  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1276  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1277  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1278  // empty
1279  } else {
1280  int h_chroma_shift, v_chroma_shift;
1282  &h_chroma_shift,
1283  &v_chroma_shift);
1284 
1285  for (i = 0; i < 3; i++) {
1286  int src_stride = pic_arg->linesize[i];
1287  int dst_stride = i ? s->uvlinesize : s->linesize;
1288  int h_shift = i ? h_chroma_shift : 0;
1289  int v_shift = i ? v_chroma_shift : 0;
1290  int w = s->width >> h_shift;
1291  int h = s->height >> v_shift;
1292  uint8_t *src = pic_arg->data[i];
1293  uint8_t *dst = pic->f->data[i];
1294  int vpad = 16;
1295 
1296  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1297  && !s->progressive_sequence
1298  && FFALIGN(s->height, 32) - s->height > 16)
1299  vpad = 32;
1300 
1301  if (!s->avctx->rc_buffer_size)
1302  dst += INPLACE_OFFSET;
1303 
1304  if (src_stride == dst_stride)
1305  memcpy(dst, src, src_stride * h);
1306  else {
1307  int h2 = h;
1308  uint8_t *dst2 = dst;
1309  while (h2--) {
1310  memcpy(dst2, src, w);
1311  dst2 += dst_stride;
1312  src += src_stride;
1313  }
1314  }
1315  if ((s->width & 15) || (s->height & (vpad-1))) {
1316  s->mpvencdsp.draw_edges(dst, dst_stride,
1317  w, h,
1318  16 >> h_shift,
1319  vpad >> v_shift,
1320  EDGE_BOTTOM);
1321  }
1322  }
1323  emms_c();
1324  }
1325  }
1326  ret = av_frame_copy_props(pic->f, pic_arg);
1327  if (ret < 0)
1328  return ret;
1329 
1330  pic->f->display_picture_number = display_picture_number;
1331  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1332  } else {
1333  /* Flushing: When we have not received enough input frames,
1334  * ensure s->input_picture[0] contains the first picture */
1335  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1336  if (s->input_picture[flush_offset])
1337  break;
1338 
1339  if (flush_offset <= 1)
1340  flush_offset = 1;
1341  else
1342  encoding_delay = encoding_delay - flush_offset + 1;
1343  }
1344 
1345  /* shift buffer entries */
1346  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1347  s->input_picture[i - flush_offset] = s->input_picture[i];
1348 
1349  s->input_picture[encoding_delay] = (Picture*) pic;
1350 
1351  return 0;
1352 }
1353 
1355 {
1356  int x, y, plane;
1357  int score = 0;
1358  int64_t score64 = 0;
1359 
1360  for (plane = 0; plane < 3; plane++) {
1361  const int stride = p->f->linesize[plane];
1362  const int bw = plane ? 1 : 2;
1363  for (y = 0; y < s->mb_height * bw; y++) {
1364  for (x = 0; x < s->mb_width * bw; x++) {
1365  int off = p->shared ? 0 : 16;
1366  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1367  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1368  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1369 
1370  switch (FFABS(s->frame_skip_exp)) {
1371  case 0: score = FFMAX(score, v); break;
1372  case 1: score += FFABS(v); break;
1373  case 2: score64 += v * (int64_t)v; break;
1374  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1375  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1376  }
1377  }
1378  }
1379  }
1380  emms_c();
1381 
1382  if (score)
1383  score64 = score;
1384  if (s->frame_skip_exp < 0)
1385  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1386  -1.0/s->frame_skip_exp);
1387 
1388  if (score64 < s->frame_skip_threshold)
1389  return 1;
1390  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1391  return 1;
1392  return 0;
1393 }
1394 
1396 {
1397  AVPacket pkt = { 0 };
1398  int ret, got_output;
1399 
1400  av_init_packet(&pkt);
1401  ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1402  if (ret < 0)
1403  return ret;
1404 
1405  ret = pkt.size;
1406  av_packet_unref(&pkt);
1407  return ret;
1408 }
1409 
1411 {
1414  const int scale = s->brd_scale;
1415  int i, j, out_size, p_lambda, b_lambda, lambda2;
1416  int64_t best_rd = INT64_MAX;
1417  int best_b_count = -1;
1418 
1419  if (!c)
1420  return AVERROR(ENOMEM);
1421  av_assert0(scale >= 0 && scale <= 3);
1422 
1423  //emms_c();
1424  //s->next_picture_ptr->quality;
1425  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1426  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1427  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1428  if (!b_lambda) // FIXME we should do this somewhere else
1429  b_lambda = p_lambda;
1430  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1432 
1433  c->width = s->width >> scale;
1434  c->height = s->height >> scale;
1436  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1437  c->mb_decision = s->avctx->mb_decision;
1438  c->me_cmp = s->avctx->me_cmp;
1439  c->mb_cmp = s->avctx->mb_cmp;
1440  c->me_sub_cmp = s->avctx->me_sub_cmp;
1442  c->time_base = s->avctx->time_base;
1443  c->max_b_frames = s->max_b_frames;
1444 
1445  if (avcodec_open2(c, codec, NULL) < 0)
1446  return -1;
1447 
1448  for (i = 0; i < s->max_b_frames + 2; i++) {
1449  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1450  s->next_picture_ptr;
1451  uint8_t *data[4];
1452 
1453  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1454  pre_input = *pre_input_ptr;
1455  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1456 
1457  if (!pre_input.shared && i) {
1458  data[0] += INPLACE_OFFSET;
1459  data[1] += INPLACE_OFFSET;
1460  data[2] += INPLACE_OFFSET;
1461  }
1462 
1463  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1464  s->tmp_frames[i]->linesize[0],
1465  data[0],
1466  pre_input.f->linesize[0],
1467  c->width, c->height);
1468  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1469  s->tmp_frames[i]->linesize[1],
1470  data[1],
1471  pre_input.f->linesize[1],
1472  c->width >> 1, c->height >> 1);
1473  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1474  s->tmp_frames[i]->linesize[2],
1475  data[2],
1476  pre_input.f->linesize[2],
1477  c->width >> 1, c->height >> 1);
1478  }
1479  }
1480 
1481  for (j = 0; j < s->max_b_frames + 1; j++) {
1482  int64_t rd = 0;
1483 
1484  if (!s->input_picture[j])
1485  break;
1486 
1487  c->error[0] = c->error[1] = c->error[2] = 0;
1488 
1490  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1491 
1492  out_size = encode_frame(c, s->tmp_frames[0]);
1493 
1494  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1495 
1496  for (i = 0; i < s->max_b_frames + 1; i++) {
1497  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1498 
1499  s->tmp_frames[i + 1]->pict_type = is_p ?
1501  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1502 
1503  out_size = encode_frame(c, s->tmp_frames[i + 1]);
1504 
1505  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1506  }
1507 
1508  /* get the delayed frames */
1509  while (out_size) {
1510  out_size = encode_frame(c, NULL);
1511  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1512  }
1513 
1514  rd += c->error[0] + c->error[1] + c->error[2];
1515 
1516  if (rd < best_rd) {
1517  best_rd = rd;
1518  best_b_count = j;
1519  }
1520  }
1521 
1523 
1524  return best_b_count;
1525 }
1526 
1528 {
1529  int i, ret;
1530 
1531  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1533  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1534 
1535  /* set next picture type & ordering */
1536  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1537  if (s->frame_skip_threshold || s->frame_skip_factor) {
1538  if (s->picture_in_gop_number < s->gop_size &&
1539  s->next_picture_ptr &&
1540  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1541  // FIXME check that the gop check above is +-1 correct
1542  av_frame_unref(s->input_picture[0]->f);
1543 
1544  ff_vbv_update(s, 0);
1545 
1546  goto no_output_pic;
1547  }
1548  }
1549 
1550  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1551  !s->next_picture_ptr || s->intra_only) {
1552  s->reordered_input_picture[0] = s->input_picture[0];
1555  s->coded_picture_number++;
1556  } else {
1557  int b_frames = 0;
1558 
1559  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1560  for (i = 0; i < s->max_b_frames + 1; i++) {
1561  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1562 
1563  if (pict_num >= s->rc_context.num_entries)
1564  break;
1565  if (!s->input_picture[i]) {
1566  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1567  break;
1568  }
1569 
1570  s->input_picture[i]->f->pict_type =
1571  s->rc_context.entry[pict_num].new_pict_type;
1572  }
1573  }
1574 
1575  if (s->b_frame_strategy == 0) {
1576  b_frames = s->max_b_frames;
1577  while (b_frames && !s->input_picture[b_frames])
1578  b_frames--;
1579  } else if (s->b_frame_strategy == 1) {
1580  for (i = 1; i < s->max_b_frames + 1; i++) {
1581  if (s->input_picture[i] &&
1582  s->input_picture[i]->b_frame_score == 0) {
1583  s->input_picture[i]->b_frame_score =
1584  get_intra_count(s,
1585  s->input_picture[i ]->f->data[0],
1586  s->input_picture[i - 1]->f->data[0],
1587  s->linesize) + 1;
1588  }
1589  }
1590  for (i = 0; i < s->max_b_frames + 1; i++) {
1591  if (!s->input_picture[i] ||
1592  s->input_picture[i]->b_frame_score - 1 >
1593  s->mb_num / s->b_sensitivity)
1594  break;
1595  }
1596 
1597  b_frames = FFMAX(0, i - 1);
1598 
1599  /* reset scores */
1600  for (i = 0; i < b_frames + 1; i++) {
1601  s->input_picture[i]->b_frame_score = 0;
1602  }
1603  } else if (s->b_frame_strategy == 2) {
1604  b_frames = estimate_best_b_count(s);
1605  }
1606 
1607  emms_c();
1608 
1609  for (i = b_frames - 1; i >= 0; i--) {
1610  int type = s->input_picture[i]->f->pict_type;
1611  if (type && type != AV_PICTURE_TYPE_B)
1612  b_frames = i;
1613  }
1614  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1615  b_frames == s->max_b_frames) {
1617  "warning, too many B-frames in a row\n");
1618  }
1619 
1620  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1621  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1622  s->gop_size > s->picture_in_gop_number) {
1623  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1624  } else {
1626  b_frames = 0;
1627  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1628  }
1629  }
1630 
1631  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1632  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1633  b_frames--;
1634 
1635  s->reordered_input_picture[0] = s->input_picture[b_frames];
1639  s->coded_picture_number++;
1640  for (i = 0; i < b_frames; i++) {
1641  s->reordered_input_picture[i + 1] = s->input_picture[i];
1642  s->reordered_input_picture[i + 1]->f->pict_type =
1645  s->coded_picture_number++;
1646  }
1647  }
1648  }
1649 no_output_pic:
1651 
1652  if (s->reordered_input_picture[0]) {
1655  AV_PICTURE_TYPE_B ? 3 : 0;
1656 
1657  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1658  return ret;
1659 
1660  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1661  // input is a shared pix, so we can't modify it -> allocate a new
1662  // one & ensure that the shared one is reuseable
1663 
1664  Picture *pic;
1665  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1666  if (i < 0)
1667  return i;
1668  pic = &s->picture[i];
1669 
1671  if (alloc_picture(s, pic, 0) < 0) {
1672  return -1;
1673  }
1674 
1675  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1676  if (ret < 0)
1677  return ret;
1678 
1679  /* mark us unused / free shared pic */
1681  s->reordered_input_picture[0]->shared = 0;
1682 
1683  s->current_picture_ptr = pic;
1684  } else {
1685  // input is not a shared pix -> reuse buffer for current_pix
1687  for (i = 0; i < 4; i++) {
1688  s->new_picture.f->data[i] += INPLACE_OFFSET;
1689  }
1690  }
1692  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1693  s->current_picture_ptr)) < 0)
1694  return ret;
1695 
1697  }
1698  return 0;
1699 }
1700 
1701 static void frame_end(MpegEncContext *s)
1702 {
1703  if (s->unrestricted_mv &&
1705  !s->intra_only) {
1707  int hshift = desc->log2_chroma_w;
1708  int vshift = desc->log2_chroma_h;
1710  s->current_picture.f->linesize[0],
1711  s->h_edge_pos, s->v_edge_pos,
1713  EDGE_TOP | EDGE_BOTTOM);
1715  s->current_picture.f->linesize[1],
1716  s->h_edge_pos >> hshift,
1717  s->v_edge_pos >> vshift,
1718  EDGE_WIDTH >> hshift,
1719  EDGE_WIDTH >> vshift,
1720  EDGE_TOP | EDGE_BOTTOM);
1722  s->current_picture.f->linesize[2],
1723  s->h_edge_pos >> hshift,
1724  s->v_edge_pos >> vshift,
1725  EDGE_WIDTH >> hshift,
1726  EDGE_WIDTH >> vshift,
1727  EDGE_TOP | EDGE_BOTTOM);
1728  }
1729 
1730  emms_c();
1731 
1732  s->last_pict_type = s->pict_type;
1734  if (s->pict_type!= AV_PICTURE_TYPE_B)
1736 
1737 #if FF_API_CODED_FRAME
1742 #endif
1743 #if FF_API_ERROR_FRAME
1746  sizeof(s->current_picture.encoding_error));
1748 #endif
1749 }
1750 
1752 {
1753  int intra, i;
1754 
1755  for (intra = 0; intra < 2; intra++) {
1756  if (s->dct_count[intra] > (1 << 16)) {
1757  for (i = 0; i < 64; i++) {
1758  s->dct_error_sum[intra][i] >>= 1;
1759  }
1760  s->dct_count[intra] >>= 1;
1761  }
1762 
1763  for (i = 0; i < 64; i++) {
1764  s->dct_offset[intra][i] = (s->noise_reduction *
1765  s->dct_count[intra] +
1766  s->dct_error_sum[intra][i] / 2) /
1767  (s->dct_error_sum[intra][i] + 1);
1768  }
1769  }
1770 }
1771 
1773 {
1774  int ret;
1775 
1776  /* mark & release old frames */
1777  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1779  s->last_picture_ptr->f->buf[0]) {
1781  }
1782 
1785 
1787  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1788  s->current_picture_ptr)) < 0)
1789  return ret;
1790 
1791  if (s->pict_type != AV_PICTURE_TYPE_B) {
1793  if (!s->droppable)
1795  }
1796 
1797  if (s->last_picture_ptr) {
1799  if (s->last_picture_ptr->f->buf[0] &&
1800  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1801  s->last_picture_ptr)) < 0)
1802  return ret;
1803  }
1804  if (s->next_picture_ptr) {
1806  if (s->next_picture_ptr->f->buf[0] &&
1807  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1808  s->next_picture_ptr)) < 0)
1809  return ret;
1810  }
1811 
1812  if (s->picture_structure!= PICT_FRAME) {
1813  int i;
1814  for (i = 0; i < 4; i++) {
1816  s->current_picture.f->data[i] +=
1817  s->current_picture.f->linesize[i];
1818  }
1819  s->current_picture.f->linesize[i] *= 2;
1820  s->last_picture.f->linesize[i] *= 2;
1821  s->next_picture.f->linesize[i] *= 2;
1822  }
1823  }
1824 
1825  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1828  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1831  } else {
1834  }
1835 
1836  if (s->dct_error_sum) {
1839  }
1840 
1841  return 0;
1842 }
1843 
1845  const AVFrame *pic_arg, int *got_packet)
1846 {
1847  MpegEncContext *s = avctx->priv_data;
1848  int i, stuffing_count, ret;
1849  int context_count = s->slice_context_count;
1850 
1851  s->vbv_ignore_qmax = 0;
1852 
1853  s->picture_in_gop_number++;
1854 
1855  if (load_input_picture(s, pic_arg) < 0)
1856  return -1;
1857 
1858  if (select_input_picture(s) < 0) {
1859  return -1;
1860  }
1861 
1862  /* output? */
1863  if (s->new_picture.f->data[0]) {
1864  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1865  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1866  :
1867  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1868  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1869  return ret;
1870  if (s->mb_info) {
1873  s->mb_width*s->mb_height*12);
1874  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1875  }
1876 
1877  for (i = 0; i < context_count; i++) {
1878  int start_y = s->thread_context[i]->start_mb_y;
1879  int end_y = s->thread_context[i]-> end_mb_y;
1880  int h = s->mb_height;
1881  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1882  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1883 
1884  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1885  }
1886 
1887  s->pict_type = s->new_picture.f->pict_type;
1888  //emms_c();
1889  ret = frame_start(s);
1890  if (ret < 0)
1891  return ret;
1892 vbv_retry:
1893  ret = encode_picture(s, s->picture_number);
1894  if (growing_buffer) {
1895  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1896  pkt->data = s->pb.buf;
1897  pkt->size = avctx->internal->byte_buffer_size;
1898  }
1899  if (ret < 0)
1900  return -1;
1901 
1902 #if FF_API_STAT_BITS
1904  avctx->header_bits = s->header_bits;
1905  avctx->mv_bits = s->mv_bits;
1906  avctx->misc_bits = s->misc_bits;
1907  avctx->i_tex_bits = s->i_tex_bits;
1908  avctx->p_tex_bits = s->p_tex_bits;
1909  avctx->i_count = s->i_count;
1910  // FIXME f/b_count in avctx
1911  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1912  avctx->skip_count = s->skip_count;
1914 #endif
1915 
1916  frame_end(s);
1917 
1918  if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1920 
1921  if (avctx->rc_buffer_size) {
1922  RateControlContext *rcc = &s->rc_context;
1923  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1924  int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1925  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1926 
1927  if (put_bits_count(&s->pb) > max_size &&
1928  s->lambda < s->lmax) {
1929  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1930  (s->qscale + 1) / s->qscale);
1931  if (s->adaptive_quant) {
1932  int i;
1933  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1934  s->lambda_table[i] =
1935  FFMAX(s->lambda_table[i] + min_step,
1936  s->lambda_table[i] * (s->qscale + 1) /
1937  s->qscale);
1938  }
1939  s->mb_skipped = 0; // done in frame_start()
1940  // done in encode_picture() so we must undo it
1941  if (s->pict_type == AV_PICTURE_TYPE_P) {
1942  if (s->flipflop_rounding ||
1943  s->codec_id == AV_CODEC_ID_H263P ||
1945  s->no_rounding ^= 1;
1946  }
1947  if (s->pict_type != AV_PICTURE_TYPE_B) {
1948  s->time_base = s->last_time_base;
1949  s->last_non_b_time = s->time - s->pp_time;
1950  }
1951  for (i = 0; i < context_count; i++) {
1952  PutBitContext *pb = &s->thread_context[i]->pb;
1953  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1954  }
1955  s->vbv_ignore_qmax = 1;
1956  av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1957  goto vbv_retry;
1958  }
1959 
1961  }
1962 
1963  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1965 
1966  for (i = 0; i < 4; i++) {
1968  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1969  }
1972  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1973  s->pict_type);
1974 
1975  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1976  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1977  s->misc_bits + s->i_tex_bits +
1978  s->p_tex_bits);
1979  flush_put_bits(&s->pb);
1980  s->frame_bits = put_bits_count(&s->pb);
1981 
1982  stuffing_count = ff_vbv_update(s, s->frame_bits);
1983  s->stuffing_bits = 8*stuffing_count;
1984  if (stuffing_count) {
1985  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1986  stuffing_count + 50) {
1987  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1988  return -1;
1989  }
1990 
1991  switch (s->codec_id) {
1994  while (stuffing_count--) {
1995  put_bits(&s->pb, 8, 0);
1996  }
1997  break;
1998  case AV_CODEC_ID_MPEG4:
1999  put_bits(&s->pb, 16, 0);
2000  put_bits(&s->pb, 16, 0x1C3);
2001  stuffing_count -= 4;
2002  while (stuffing_count--) {
2003  put_bits(&s->pb, 8, 0xFF);
2004  }
2005  break;
2006  default:
2007  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2008  }
2009  flush_put_bits(&s->pb);
2010  s->frame_bits = put_bits_count(&s->pb);
2011  }
2012 
2013  /* update MPEG-1/2 vbv_delay for CBR */
2014  if (s->avctx->rc_max_rate &&
2015  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
2016  s->out_format == FMT_MPEG1 &&
2017  90000LL * (avctx->rc_buffer_size - 1) <=
2018  s->avctx->rc_max_rate * 0xFFFFLL) {
2019  AVCPBProperties *props;
2020  size_t props_size;
2021 
2022  int vbv_delay, min_delay;
2023  double inbits = s->avctx->rc_max_rate *
2024  av_q2d(s->avctx->time_base);
2025  int minbits = s->frame_bits - 8 *
2026  (s->vbv_delay_ptr - s->pb.buf - 1);
2027  double bits = s->rc_context.buffer_index + minbits - inbits;
2028 
2029  if (bits < 0)
2031  "Internal error, negative bits\n");
2032 
2033  assert(s->repeat_first_field == 0);
2034 
2035  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2036  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2037  s->avctx->rc_max_rate;
2038 
2039  vbv_delay = FFMAX(vbv_delay, min_delay);
2040 
2041  av_assert0(vbv_delay < 0xFFFF);
2042 
2043  s->vbv_delay_ptr[0] &= 0xF8;
2044  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2045  s->vbv_delay_ptr[1] = vbv_delay >> 5;
2046  s->vbv_delay_ptr[2] &= 0x07;
2047  s->vbv_delay_ptr[2] |= vbv_delay << 3;
2048 
2049  props = av_cpb_properties_alloc(&props_size);
2050  if (!props)
2051  return AVERROR(ENOMEM);
2052  props->vbv_delay = vbv_delay * 300;
2053 
2055  (uint8_t*)props, props_size);
2056  if (ret < 0) {
2057  av_freep(&props);
2058  return ret;
2059  }
2060 
2061 #if FF_API_VBV_DELAY
2063  avctx->vbv_delay = vbv_delay * 300;
2065 #endif
2066  }
2067  s->total_bits += s->frame_bits;
2068 #if FF_API_STAT_BITS
2070  avctx->frame_bits = s->frame_bits;
2072 #endif
2073 
2074 
2075  pkt->pts = s->current_picture.f->pts;
2076  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2078  pkt->dts = pkt->pts - s->dts_delta;
2079  else
2080  pkt->dts = s->reordered_pts;
2081  s->reordered_pts = pkt->pts;
2082  } else
2083  pkt->dts = pkt->pts;
2084  if (s->current_picture.f->key_frame)
2085  pkt->flags |= AV_PKT_FLAG_KEY;
2086  if (s->mb_info)
2088  } else {
2089  s->frame_bits = 0;
2090  }
2091 
2092  /* release non-reference frames */
2093  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2094  if (!s->picture[i].reference)
2095  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2096  }
2097 
2098  av_assert1((s->frame_bits & 7) == 0);
2099 
2100  pkt->size = s->frame_bits / 8;
2101  *got_packet = !!pkt->size;
2102  return 0;
2103 }
2104 
2106  int n, int threshold)
2107 {
2108  static const char tab[64] = {
2109  3, 2, 2, 1, 1, 1, 1, 1,
2110  1, 1, 1, 1, 1, 1, 1, 1,
2111  1, 1, 1, 1, 1, 1, 1, 1,
2112  0, 0, 0, 0, 0, 0, 0, 0,
2113  0, 0, 0, 0, 0, 0, 0, 0,
2114  0, 0, 0, 0, 0, 0, 0, 0,
2115  0, 0, 0, 0, 0, 0, 0, 0,
2116  0, 0, 0, 0, 0, 0, 0, 0
2117  };
2118  int score = 0;
2119  int run = 0;
2120  int i;
2121  int16_t *block = s->block[n];
2122  const int last_index = s->block_last_index[n];
2123  int skip_dc;
2124 
2125  if (threshold < 0) {
2126  skip_dc = 0;
2127  threshold = -threshold;
2128  } else
2129  skip_dc = 1;
2130 
2131  /* Are all we could set to zero already zero? */
2132  if (last_index <= skip_dc - 1)
2133  return;
2134 
2135  for (i = 0; i <= last_index; i++) {
2136  const int j = s->intra_scantable.permutated[i];
2137  const int level = FFABS(block[j]);
2138  if (level == 1) {
2139  if (skip_dc && i == 0)
2140  continue;
2141  score += tab[run];
2142  run = 0;
2143  } else if (level > 1) {
2144  return;
2145  } else {
2146  run++;
2147  }
2148  }
2149  if (score >= threshold)
2150  return;
2151  for (i = skip_dc; i <= last_index; i++) {
2152  const int j = s->intra_scantable.permutated[i];
2153  block[j] = 0;
2154  }
2155  if (block[0])
2156  s->block_last_index[n] = 0;
2157  else
2158  s->block_last_index[n] = -1;
2159 }
2160 
2161 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2162  int last_index)
2163 {
2164  int i;
2165  const int maxlevel = s->max_qcoeff;
2166  const int minlevel = s->min_qcoeff;
2167  int overflow = 0;
2168 
2169  if (s->mb_intra) {
2170  i = 1; // skip clipping of intra dc
2171  } else
2172  i = 0;
2173 
2174  for (; i <= last_index; i++) {
2175  const int j = s->intra_scantable.permutated[i];
2176  int level = block[j];
2177 
2178  if (level > maxlevel) {
2179  level = maxlevel;
2180  overflow++;
2181  } else if (level < minlevel) {
2182  level = minlevel;
2183  overflow++;
2184  }
2185 
2186  block[j] = level;
2187  }
2188 
2189  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2190  av_log(s->avctx, AV_LOG_INFO,
2191  "warning, clipping %d dct coefficients to %d..%d\n",
2192  overflow, minlevel, maxlevel);
2193 }
2194 
2195 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2196 {
2197  int x, y;
2198  // FIXME optimize
2199  for (y = 0; y < 8; y++) {
2200  for (x = 0; x < 8; x++) {
2201  int x2, y2;
2202  int sum = 0;
2203  int sqr = 0;
2204  int count = 0;
2205 
2206  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2207  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2208  int v = ptr[x2 + y2 * stride];
2209  sum += v;
2210  sqr += v * v;
2211  count++;
2212  }
2213  }
2214  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2215  }
2216  }
2217 }
2218 
2220  int motion_x, int motion_y,
2221  int mb_block_height,
2222  int mb_block_width,
2223  int mb_block_count)
2224 {
2225  int16_t weight[12][64];
2226  int16_t orig[12][64];
2227  const int mb_x = s->mb_x;
2228  const int mb_y = s->mb_y;
2229  int i;
2230  int skip_dct[12];
2231  int dct_offset = s->linesize * 8; // default for progressive frames
2232  int uv_dct_offset = s->uvlinesize * 8;
2233  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2234  ptrdiff_t wrap_y, wrap_c;
2235 
2236  for (i = 0; i < mb_block_count; i++)
2237  skip_dct[i] = s->skipdct;
2238 
2239  if (s->adaptive_quant) {
2240  const int last_qp = s->qscale;
2241  const int mb_xy = mb_x + mb_y * s->mb_stride;
2242 
2243  s->lambda = s->lambda_table[mb_xy];
2244  update_qscale(s);
2245 
2246  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2247  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2248  s->dquant = s->qscale - last_qp;
2249 
2250  if (s->out_format == FMT_H263) {
2251  s->dquant = av_clip(s->dquant, -2, 2);
2252 
2253  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2254  if (!s->mb_intra) {
2255  if (s->pict_type == AV_PICTURE_TYPE_B) {
2256  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2257  s->dquant = 0;
2258  }
2259  if (s->mv_type == MV_TYPE_8X8)
2260  s->dquant = 0;
2261  }
2262  }
2263  }
2264  }
2265  ff_set_qscale(s, last_qp + s->dquant);
2266  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2267  ff_set_qscale(s, s->qscale + s->dquant);
2268 
2269  wrap_y = s->linesize;
2270  wrap_c = s->uvlinesize;
2271  ptr_y = s->new_picture.f->data[0] +
2272  (mb_y * 16 * wrap_y) + mb_x * 16;
2273  ptr_cb = s->new_picture.f->data[1] +
2274  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2275  ptr_cr = s->new_picture.f->data[2] +
2276  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2277 
2278  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2279  uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y;
2280  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2281  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2282  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2283  wrap_y, wrap_y,
2284  16, 16, mb_x * 16, mb_y * 16,
2285  s->width, s->height);
2286  ptr_y = ebuf;
2287  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2288  wrap_c, wrap_c,
2289  mb_block_width, mb_block_height,
2290  mb_x * mb_block_width, mb_y * mb_block_height,
2291  cw, ch);
2292  ptr_cb = ebuf + 16 * wrap_y;
2293  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2294  wrap_c, wrap_c,
2295  mb_block_width, mb_block_height,
2296  mb_x * mb_block_width, mb_y * mb_block_height,
2297  cw, ch);
2298  ptr_cr = ebuf + 16 * wrap_y + 16;
2299  }
2300 
2301  if (s->mb_intra) {
2303  int progressive_score, interlaced_score;
2304 
2305  s->interlaced_dct = 0;
2306  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2307  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2308  NULL, wrap_y, 8) - 400;
2309 
2310  if (progressive_score > 0) {
2311  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2312  NULL, wrap_y * 2, 8) +
2313  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2314  NULL, wrap_y * 2, 8);
2315  if (progressive_score > interlaced_score) {
2316  s->interlaced_dct = 1;
2317 
2318  dct_offset = wrap_y;
2319  uv_dct_offset = wrap_c;
2320  wrap_y <<= 1;
2321  if (s->chroma_format == CHROMA_422 ||
2322  s->chroma_format == CHROMA_444)
2323  wrap_c <<= 1;
2324  }
2325  }
2326  }
2327 
2328  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2329  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2330  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2331  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2332 
2333  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2334  skip_dct[4] = 1;
2335  skip_dct[5] = 1;
2336  } else {
2337  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2338  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2339  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2340  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2341  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2342  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2343  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2344  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2345  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2346  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2347  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2348  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2349  }
2350  }
2351  } else {
2352  op_pixels_func (*op_pix)[4];
2353  qpel_mc_func (*op_qpix)[16];
2354  uint8_t *dest_y, *dest_cb, *dest_cr;
2355 
2356  dest_y = s->dest[0];
2357  dest_cb = s->dest[1];
2358  dest_cr = s->dest[2];
2359 
2360  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2361  op_pix = s->hdsp.put_pixels_tab;
2362  op_qpix = s->qdsp.put_qpel_pixels_tab;
2363  } else {
2364  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2365  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2366  }
2367 
2368  if (s->mv_dir & MV_DIR_FORWARD) {
2369  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2370  s->last_picture.f->data,
2371  op_pix, op_qpix);
2372  op_pix = s->hdsp.avg_pixels_tab;
2373  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2374  }
2375  if (s->mv_dir & MV_DIR_BACKWARD) {
2376  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2377  s->next_picture.f->data,
2378  op_pix, op_qpix);
2379  }
2380 
2382  int progressive_score, interlaced_score;
2383 
2384  s->interlaced_dct = 0;
2385  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2386  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2387  ptr_y + wrap_y * 8,
2388  wrap_y, 8) - 400;
2389 
2390  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2391  progressive_score -= 400;
2392 
2393  if (progressive_score > 0) {
2394  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2395  wrap_y * 2, 8) +
2396  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2397  ptr_y + wrap_y,
2398  wrap_y * 2, 8);
2399 
2400  if (progressive_score > interlaced_score) {
2401  s->interlaced_dct = 1;
2402 
2403  dct_offset = wrap_y;
2404  uv_dct_offset = wrap_c;
2405  wrap_y <<= 1;
2406  if (s->chroma_format == CHROMA_422)
2407  wrap_c <<= 1;
2408  }
2409  }
2410  }
2411 
2412  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2413  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2414  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2415  dest_y + dct_offset, wrap_y);
2416  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2417  dest_y + dct_offset + 8, wrap_y);
2418 
2419  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2420  skip_dct[4] = 1;
2421  skip_dct[5] = 1;
2422  } else {
2423  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2424  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2425  if (!s->chroma_y_shift) { /* 422 */
2426  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2427  dest_cb + uv_dct_offset, wrap_c);
2428  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2429  dest_cr + uv_dct_offset, wrap_c);
2430  }
2431  }
2432  /* pre quantization */
2433  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2434  2 * s->qscale * s->qscale) {
2435  // FIXME optimize
2436  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2437  skip_dct[0] = 1;
2438  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2439  skip_dct[1] = 1;
2440  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2441  wrap_y, 8) < 20 * s->qscale)
2442  skip_dct[2] = 1;
2443  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2444  wrap_y, 8) < 20 * s->qscale)
2445  skip_dct[3] = 1;
2446  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2447  skip_dct[4] = 1;
2448  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2449  skip_dct[5] = 1;
2450  if (!s->chroma_y_shift) { /* 422 */
2451  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2452  dest_cb + uv_dct_offset,
2453  wrap_c, 8) < 20 * s->qscale)
2454  skip_dct[6] = 1;
2455  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2456  dest_cr + uv_dct_offset,
2457  wrap_c, 8) < 20 * s->qscale)
2458  skip_dct[7] = 1;
2459  }
2460  }
2461  }
2462 
2463  if (s->quantizer_noise_shaping) {
2464  if (!skip_dct[0])
2465  get_visual_weight(weight[0], ptr_y , wrap_y);
2466  if (!skip_dct[1])
2467  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2468  if (!skip_dct[2])
2469  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2470  if (!skip_dct[3])
2471  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2472  if (!skip_dct[4])
2473  get_visual_weight(weight[4], ptr_cb , wrap_c);
2474  if (!skip_dct[5])
2475  get_visual_weight(weight[5], ptr_cr , wrap_c);
2476  if (!s->chroma_y_shift) { /* 422 */
2477  if (!skip_dct[6])
2478  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2479  wrap_c);
2480  if (!skip_dct[7])
2481  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2482  wrap_c);
2483  }
2484  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2485  }
2486 
2487  /* DCT & quantize */
2488  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2489  {
2490  for (i = 0; i < mb_block_count; i++) {
2491  if (!skip_dct[i]) {
2492  int overflow;
2493  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2494  // FIXME we could decide to change to quantizer instead of
2495  // clipping
2496  // JS: I don't think that would be a good idea it could lower
2497  // quality instead of improve it. Just INTRADC clipping
2498  // deserves changes in quantizer
2499  if (overflow)
2500  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2501  } else
2502  s->block_last_index[i] = -1;
2503  }
2504  if (s->quantizer_noise_shaping) {
2505  for (i = 0; i < mb_block_count; i++) {
2506  if (!skip_dct[i]) {
2507  s->block_last_index[i] =
2508  dct_quantize_refine(s, s->block[i], weight[i],
2509  orig[i], i, s->qscale);
2510  }
2511  }
2512  }
2513 
2514  if (s->luma_elim_threshold && !s->mb_intra)
2515  for (i = 0; i < 4; i++)
2517  if (s->chroma_elim_threshold && !s->mb_intra)
2518  for (i = 4; i < mb_block_count; i++)
2520 
2521  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2522  for (i = 0; i < mb_block_count; i++) {
2523  if (s->block_last_index[i] == -1)
2524  s->coded_score[i] = INT_MAX / 256;
2525  }
2526  }
2527  }
2528 
2529  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2530  s->block_last_index[4] =
2531  s->block_last_index[5] = 0;
2532  s->block[4][0] =
2533  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2534  if (!s->chroma_y_shift) { /* 422 / 444 */
2535  for (i=6; i<12; i++) {
2536  s->block_last_index[i] = 0;
2537  s->block[i][0] = s->block[4][0];
2538  }
2539  }
2540  }
2541 
2542  // non c quantize code returns incorrect block_last_index FIXME
2543  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2544  for (i = 0; i < mb_block_count; i++) {
2545  int j;
2546  if (s->block_last_index[i] > 0) {
2547  for (j = 63; j > 0; j--) {
2548  if (s->block[i][s->intra_scantable.permutated[j]])
2549  break;
2550  }
2551  s->block_last_index[i] = j;
2552  }
2553  }
2554  }
2555 
2556  /* huffman encode */
2557  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2560  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2561  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2562  break;
2563  case AV_CODEC_ID_MPEG4:
2564  if (CONFIG_MPEG4_ENCODER)
2565  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2566  break;
2567  case AV_CODEC_ID_MSMPEG4V2:
2568  case AV_CODEC_ID_MSMPEG4V3:
2569  case AV_CODEC_ID_WMV1:
2571  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2572  break;
2573  case AV_CODEC_ID_WMV2:
2574  if (CONFIG_WMV2_ENCODER)
2575  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2576  break;
2577  case AV_CODEC_ID_H261:
2578  if (CONFIG_H261_ENCODER)
2579  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2580  break;
2581  case AV_CODEC_ID_H263:
2582  case AV_CODEC_ID_H263P:
2583  case AV_CODEC_ID_FLV1:
2584  case AV_CODEC_ID_RV10:
2585  case AV_CODEC_ID_RV20:
2586  if (CONFIG_H263_ENCODER)
2587  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2588  break;
2589  case AV_CODEC_ID_MJPEG:
2590  case AV_CODEC_ID_AMV:
2591  if (CONFIG_MJPEG_ENCODER)
2592  ff_mjpeg_encode_mb(s, s->block);
2593  break;
2594  default:
2595  av_assert1(0);
2596  }
2597 }
2598 
2599 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2600 {
2601  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2602  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2603  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2604 }
2605 
2607  int i;
2608 
2609  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2610 
2611  /* MPEG-1 */
2612  d->mb_skip_run= s->mb_skip_run;
2613  for(i=0; i<3; i++)
2614  d->last_dc[i] = s->last_dc[i];
2615 
2616  /* statistics */
2617  d->mv_bits= s->mv_bits;
2618  d->i_tex_bits= s->i_tex_bits;
2619  d->p_tex_bits= s->p_tex_bits;
2620  d->i_count= s->i_count;
2621  d->f_count= s->f_count;
2622  d->b_count= s->b_count;
2623  d->skip_count= s->skip_count;
2624  d->misc_bits= s->misc_bits;
2625  d->last_bits= 0;
2626 
2627  d->mb_skipped= 0;
2628  d->qscale= s->qscale;
2629  d->dquant= s->dquant;
2630 
2632 }
2633 
2635  int i;
2636 
2637  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2638  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2639 
2640  /* MPEG-1 */
2641  d->mb_skip_run= s->mb_skip_run;
2642  for(i=0; i<3; i++)
2643  d->last_dc[i] = s->last_dc[i];
2644 
2645  /* statistics */
2646  d->mv_bits= s->mv_bits;
2647  d->i_tex_bits= s->i_tex_bits;
2648  d->p_tex_bits= s->p_tex_bits;
2649  d->i_count= s->i_count;
2650  d->f_count= s->f_count;
2651  d->b_count= s->b_count;
2652  d->skip_count= s->skip_count;
2653  d->misc_bits= s->misc_bits;
2654 
2655  d->mb_intra= s->mb_intra;
2656  d->mb_skipped= s->mb_skipped;
2657  d->mv_type= s->mv_type;
2658  d->mv_dir= s->mv_dir;
2659  d->pb= s->pb;
2660  if(s->data_partitioning){
2661  d->pb2= s->pb2;
2662  d->tex_pb= s->tex_pb;
2663  }
2664  d->block= s->block;
2665  for(i=0; i<8; i++)
2666  d->block_last_index[i]= s->block_last_index[i];
2668  d->qscale= s->qscale;
2669 
2671 }
2672 
2673 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2675  int *dmin, int *next_block, int motion_x, int motion_y)
2676 {
2677  int score;
2678  uint8_t *dest_backup[3];
2679 
2680  copy_context_before_encode(s, backup, type);
2681 
2682  s->block= s->blocks[*next_block];
2683  s->pb= pb[*next_block];
2684  if(s->data_partitioning){
2685  s->pb2 = pb2 [*next_block];
2686  s->tex_pb= tex_pb[*next_block];
2687  }
2688 
2689  if(*next_block){
2690  memcpy(dest_backup, s->dest, sizeof(s->dest));
2691  s->dest[0] = s->sc.rd_scratchpad;
2692  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2693  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2694  av_assert0(s->linesize >= 32); //FIXME
2695  }
2696 
2697  encode_mb(s, motion_x, motion_y);
2698 
2699  score= put_bits_count(&s->pb);
2700  if(s->data_partitioning){
2701  score+= put_bits_count(&s->pb2);
2702  score+= put_bits_count(&s->tex_pb);
2703  }
2704 
2705  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2706  ff_mpv_decode_mb(s, s->block);
2707 
2708  score *= s->lambda2;
2709  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2710  }
2711 
2712  if(*next_block){
2713  memcpy(s->dest, dest_backup, sizeof(s->dest));
2714  }
2715 
2716  if(score<*dmin){
2717  *dmin= score;
2718  *next_block^=1;
2719 
2720  copy_context_after_encode(best, s, type);
2721  }
2722 }
2723 
2724 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2725  uint32_t *sq = ff_square_tab + 256;
2726  int acc=0;
2727  int x,y;
2728 
2729  if(w==16 && h==16)
2730  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2731  else if(w==8 && h==8)
2732  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2733 
2734  for(y=0; y<h; y++){
2735  for(x=0; x<w; x++){
2736  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2737  }
2738  }
2739 
2740  av_assert2(acc>=0);
2741 
2742  return acc;
2743 }
2744 
2745 static int sse_mb(MpegEncContext *s){
2746  int w= 16;
2747  int h= 16;
2748 
2749  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2750  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2751 
2752  if(w==16 && h==16)
2753  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2754  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2755  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2756  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2757  }else{
2758  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2759  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2760  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2761  }
2762  else
2763  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2764  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2765  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2766 }
2767 
2769  MpegEncContext *s= *(void**)arg;
2770 
2771 
2772  s->me.pre_pass=1;
2773  s->me.dia_size= s->avctx->pre_dia_size;
2774  s->first_slice_line=1;
2775  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2776  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2778  }
2779  s->first_slice_line=0;
2780  }
2781 
2782  s->me.pre_pass=0;
2783 
2784  return 0;
2785 }
2786 
2788  MpegEncContext *s= *(void**)arg;
2789 
2791 
2792  s->me.dia_size= s->avctx->dia_size;
2793  s->first_slice_line=1;
2794  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2795  s->mb_x=0; //for block init below
2797  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2798  s->block_index[0]+=2;
2799  s->block_index[1]+=2;
2800  s->block_index[2]+=2;
2801  s->block_index[3]+=2;
2802 
2803  /* compute motion vector & mb_type and store in context */
2806  else
2808  }
2809  s->first_slice_line=0;
2810  }
2811  return 0;
2812 }
2813 
2814 static int mb_var_thread(AVCodecContext *c, void *arg){
2815  MpegEncContext *s= *(void**)arg;
2816  int mb_x, mb_y;
2817 
2819 
2820  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2821  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2822  int xx = mb_x * 16;
2823  int yy = mb_y * 16;
2824  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2825  int varc;
2826  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2827 
2828  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2829  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2830 
2831  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2832  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2833  s->me.mb_var_sum_temp += varc;
2834  }
2835  }
2836  return 0;
2837 }
2838 
2840  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2841  if(s->partitioned_frame){
2843  }
2844 
2845  ff_mpeg4_stuffing(&s->pb);
2846  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2848  }
2849 
2851  flush_put_bits(&s->pb);
2852 
2853  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2854  s->misc_bits+= get_bits_diff(s);
2855 }
2856 
2858 {
2859  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2860  int offset = put_bits_count(&s->pb);
2861  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2862  int gobn = s->mb_y / s->gob_index;
2863  int pred_x, pred_y;
2864  if (CONFIG_H263_ENCODER)
2865  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2866  bytestream_put_le32(&ptr, offset);
2867  bytestream_put_byte(&ptr, s->qscale);
2868  bytestream_put_byte(&ptr, gobn);
2869  bytestream_put_le16(&ptr, mba);
2870  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2871  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2872  /* 4MV not implemented */
2873  bytestream_put_byte(&ptr, 0); /* hmv2 */
2874  bytestream_put_byte(&ptr, 0); /* vmv2 */
2875 }
2876 
2877 static void update_mb_info(MpegEncContext *s, int startcode)
2878 {
2879  if (!s->mb_info)
2880  return;
2881  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2882  s->mb_info_size += 12;
2883  s->prev_mb_info = s->last_mb_info;
2884  }
2885  if (startcode) {
2886  s->prev_mb_info = put_bits_count(&s->pb)/8;
2887  /* This might have incremented mb_info_size above, and we return without
2888  * actually writing any info into that slot yet. But in that case,
2889  * this will be called again at the start of the after writing the
2890  * start code, actually writing the mb info. */
2891  return;
2892  }
2893 
2894  s->last_mb_info = put_bits_count(&s->pb)/8;
2895  if (!s->mb_info_size)
2896  s->mb_info_size += 12;
2897  write_mb_info(s);
2898 }
2899 
2900 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2901 {
2902  if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2903  && s->slice_context_count == 1
2904  && s->pb.buf == s->avctx->internal->byte_buffer) {
2905  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2906  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2907 
2908  uint8_t *new_buffer = NULL;
2909  int new_buffer_size = 0;
2910 
2911  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2912  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2913  return AVERROR(ENOMEM);
2914  }
2915 
2916  emms_c();
2917 
2918  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2919  s->avctx->internal->byte_buffer_size + size_increase);
2920  if (!new_buffer)
2921  return AVERROR(ENOMEM);
2922 
2923  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2925  s->avctx->internal->byte_buffer = new_buffer;
2926  s->avctx->internal->byte_buffer_size = new_buffer_size;
2927  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2928  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2929  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2930  }
2931  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2932  return AVERROR(EINVAL);
2933  return 0;
2934 }
2935 
2936 static int encode_thread(AVCodecContext *c, void *arg){
2937  MpegEncContext *s= *(void**)arg;
2938  int mb_x, mb_y;
2939  int chr_h= 16>>s->chroma_y_shift;
2940  int i, j;
2941  MpegEncContext best_s = { 0 }, backup_s;
2942  uint8_t bit_buf[2][MAX_MB_BYTES];
2943  uint8_t bit_buf2[2][MAX_MB_BYTES];
2944  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2945  PutBitContext pb[2], pb2[2], tex_pb[2];
2946 
2948 
2949  for(i=0; i<2; i++){
2950  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2951  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2952  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2953  }
2954 
2955  s->last_bits= put_bits_count(&s->pb);
2956  s->mv_bits=0;
2957  s->misc_bits=0;
2958  s->i_tex_bits=0;
2959  s->p_tex_bits=0;
2960  s->i_count=0;
2961  s->f_count=0;
2962  s->b_count=0;
2963  s->skip_count=0;
2964 
2965  for(i=0; i<3; i++){
2966  /* init last dc values */
2967  /* note: quant matrix value (8) is implied here */
2968  s->last_dc[i] = 128 << s->intra_dc_precision;
2969 
2970  s->current_picture.encoding_error[i] = 0;
2971  }
2972  if(s->codec_id==AV_CODEC_ID_AMV){
2973  s->last_dc[0] = 128*8/13;
2974  s->last_dc[1] = 128*8/14;
2975  s->last_dc[2] = 128*8/14;
2976  }
2977  s->mb_skip_run = 0;
2978  memset(s->last_mv, 0, sizeof(s->last_mv));
2979 
2980  s->last_mv_dir = 0;
2981 
2982  switch(s->codec_id){
2983  case AV_CODEC_ID_H263:
2984  case AV_CODEC_ID_H263P:
2985  case AV_CODEC_ID_FLV1:
2986  if (CONFIG_H263_ENCODER)
2987  s->gob_index = H263_GOB_HEIGHT(s->height);
2988  break;
2989  case AV_CODEC_ID_MPEG4:
2990  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2992  break;
2993  }
2994 
2995  s->resync_mb_x=0;
2996  s->resync_mb_y=0;
2997  s->first_slice_line = 1;
2998  s->ptr_lastgob = s->pb.buf;
2999  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
3000  s->mb_x=0;
3001  s->mb_y= mb_y;
3002 
3003  ff_set_qscale(s, s->qscale);
3005 
3006  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3007  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3008  int mb_type= s->mb_type[xy];
3009 // int d;
3010  int dmin= INT_MAX;
3011  int dir;
3012  int size_increase = s->avctx->internal->byte_buffer_size/4
3013  + s->mb_width*MAX_MB_BYTES;
3014 
3015  ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3016  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3017  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3018  return -1;
3019  }
3020  if(s->data_partitioning){
3021  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3022  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3023  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3024  return -1;
3025  }
3026  }
3027 
3028  s->mb_x = mb_x;
3029  s->mb_y = mb_y; // moved into loop, can get changed by H.261
3031 
3032  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3034  xy= s->mb_y*s->mb_stride + s->mb_x;
3035  mb_type= s->mb_type[xy];
3036  }
3037 
3038  /* write gob / video packet header */
3039  if(s->rtp_mode){
3040  int current_packet_size, is_gob_start;
3041 
3042  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3043 
3044  is_gob_start = s->rtp_payload_size &&
3045  current_packet_size >= s->rtp_payload_size &&
3046  mb_y + mb_x > 0;
3047 
3048  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3049 
3050  switch(s->codec_id){
3051  case AV_CODEC_ID_H263:
3052  case AV_CODEC_ID_H263P:
3053  if(!s->h263_slice_structured)
3054  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3055  break;
3057  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3059  if(s->mb_skip_run) is_gob_start=0;
3060  break;
3061  case AV_CODEC_ID_MJPEG:
3062  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3063  break;
3064  }
3065 
3066  if(is_gob_start){
3067  if(s->start_mb_y != mb_y || mb_x!=0){
3068  write_slice_end(s);
3069 
3070  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3072  }
3073  }
3074 
3075  av_assert2((put_bits_count(&s->pb)&7) == 0);
3076  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3077 
3078  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3079  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3080  int d = 100 / s->error_rate;
3081  if(r % d == 0){
3082  current_packet_size=0;
3083  s->pb.buf_ptr= s->ptr_lastgob;
3084  assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3085  }
3086  }
3087 
3088 #if FF_API_RTP_CALLBACK
3090  if (s->avctx->rtp_callback){
3091  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3092  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3093  }
3095 #endif
3096  update_mb_info(s, 1);
3097 
3098  switch(s->codec_id){
3099  case AV_CODEC_ID_MPEG4:
3100  if (CONFIG_MPEG4_ENCODER) {
3103  }
3104  break;
3107  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3110  }
3111  break;
3112  case AV_CODEC_ID_H263:
3113  case AV_CODEC_ID_H263P:
3114  if (CONFIG_H263_ENCODER)
3115  ff_h263_encode_gob_header(s, mb_y);
3116  break;
3117  }
3118 
3119  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3120  int bits= put_bits_count(&s->pb);
3121  s->misc_bits+= bits - s->last_bits;
3122  s->last_bits= bits;
3123  }
3124 
3125  s->ptr_lastgob += current_packet_size;
3126  s->first_slice_line=1;
3127  s->resync_mb_x=mb_x;
3128  s->resync_mb_y=mb_y;
3129  }
3130  }
3131 
3132  if( (s->resync_mb_x == s->mb_x)
3133  && s->resync_mb_y+1 == s->mb_y){
3134  s->first_slice_line=0;
3135  }
3136 
3137  s->mb_skipped=0;
3138  s->dquant=0; //only for QP_RD
3139 
3140  update_mb_info(s, 0);
3141 
3142  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3143  int next_block=0;
3144  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3145 
3146  copy_context_before_encode(&backup_s, s, -1);
3147  backup_s.pb= s->pb;
3150  if(s->data_partitioning){
3151  backup_s.pb2= s->pb2;
3152  backup_s.tex_pb= s->tex_pb;
3153  }
3154 
3155  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3156  s->mv_dir = MV_DIR_FORWARD;
3157  s->mv_type = MV_TYPE_16X16;
3158  s->mb_intra= 0;
3159  s->mv[0][0][0] = s->p_mv_table[xy][0];
3160  s->mv[0][0][1] = s->p_mv_table[xy][1];
3161  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3162  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3163  }
3164  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3165  s->mv_dir = MV_DIR_FORWARD;
3166  s->mv_type = MV_TYPE_FIELD;
3167  s->mb_intra= 0;
3168  for(i=0; i<2; i++){
3169  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3170  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3171  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3172  }
3173  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3174  &dmin, &next_block, 0, 0);
3175  }
3176  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3177  s->mv_dir = MV_DIR_FORWARD;
3178  s->mv_type = MV_TYPE_16X16;
3179  s->mb_intra= 0;
3180  s->mv[0][0][0] = 0;
3181  s->mv[0][0][1] = 0;
3182  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3183  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3184  }
3185  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3186  s->mv_dir = MV_DIR_FORWARD;
3187  s->mv_type = MV_TYPE_8X8;
3188  s->mb_intra= 0;
3189  for(i=0; i<4; i++){
3190  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3191  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3192  }
3193  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3194  &dmin, &next_block, 0, 0);
3195  }
3196  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3197  s->mv_dir = MV_DIR_FORWARD;
3198  s->mv_type = MV_TYPE_16X16;
3199  s->mb_intra= 0;
3200  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3201  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3202  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3203  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3204  }
3205  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3206  s->mv_dir = MV_DIR_BACKWARD;
3207  s->mv_type = MV_TYPE_16X16;
3208  s->mb_intra= 0;
3209  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3210  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3211  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3212  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3213  }
3214  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3216  s->mv_type = MV_TYPE_16X16;
3217  s->mb_intra= 0;
3218  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3219  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3220  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3221  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3222  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3223  &dmin, &next_block, 0, 0);
3224  }
3225  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3226  s->mv_dir = MV_DIR_FORWARD;
3227  s->mv_type = MV_TYPE_FIELD;
3228  s->mb_intra= 0;
3229  for(i=0; i<2; i++){
3230  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3231  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3232  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3233  }
3234  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3235  &dmin, &next_block, 0, 0);
3236  }
3237  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3238  s->mv_dir = MV_DIR_BACKWARD;
3239  s->mv_type = MV_TYPE_FIELD;
3240  s->mb_intra= 0;
3241  for(i=0; i<2; i++){
3242  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3243  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3244  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3245  }
3246  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3247  &dmin, &next_block, 0, 0);
3248  }
3249  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3251  s->mv_type = MV_TYPE_FIELD;
3252  s->mb_intra= 0;
3253  for(dir=0; dir<2; dir++){
3254  for(i=0; i<2; i++){
3255  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3256  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3257  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3258  }
3259  }
3260  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3261  &dmin, &next_block, 0, 0);
3262  }
3263  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3264  s->mv_dir = 0;
3265  s->mv_type = MV_TYPE_16X16;
3266  s->mb_intra= 1;
3267  s->mv[0][0][0] = 0;
3268  s->mv[0][0][1] = 0;
3269  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3270  &dmin, &next_block, 0, 0);
3271  if(s->h263_pred || s->h263_aic){
3272  if(best_s.mb_intra)
3273  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3274  else
3275  ff_clean_intra_table_entries(s); //old mode?
3276  }
3277  }
3278 
3279  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3280  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3281  const int last_qp= backup_s.qscale;
3282  int qpi, qp, dc[6];
3283  int16_t ac[6][16];
3284  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3285  static const int dquant_tab[4]={-1,1,-2,2};
3286  int storecoefs = s->mb_intra && s->dc_val[0];
3287 
3288  av_assert2(backup_s.dquant == 0);
3289 
3290  //FIXME intra
3291  s->mv_dir= best_s.mv_dir;
3292  s->mv_type = MV_TYPE_16X16;
3293  s->mb_intra= best_s.mb_intra;
3294  s->mv[0][0][0] = best_s.mv[0][0][0];
3295  s->mv[0][0][1] = best_s.mv[0][0][1];
3296  s->mv[1][0][0] = best_s.mv[1][0][0];
3297  s->mv[1][0][1] = best_s.mv[1][0][1];
3298 
3299  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3300  for(; qpi<4; qpi++){
3301  int dquant= dquant_tab[qpi];
3302  qp= last_qp + dquant;
3303  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3304  continue;
3305  backup_s.dquant= dquant;
3306  if(storecoefs){
3307  for(i=0; i<6; i++){
3308  dc[i]= s->dc_val[0][ s->block_index[i] ];
3309  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3310  }
3311  }
3312 
3313  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3314  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3315  if(best_s.qscale != qp){
3316  if(storecoefs){
3317  for(i=0; i<6; i++){
3318  s->dc_val[0][ s->block_index[i] ]= dc[i];
3319  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3320  }
3321  }
3322  }
3323  }
3324  }
3325  }
3326  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3327  int mx= s->b_direct_mv_table[xy][0];
3328  int my= s->b_direct_mv_table[xy][1];
3329 
3330  backup_s.dquant = 0;
3332  s->mb_intra= 0;
3333  ff_mpeg4_set_direct_mv(s, mx, my);
3334  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3335  &dmin, &next_block, mx, my);
3336  }
3337  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3338  backup_s.dquant = 0;
3340  s->mb_intra= 0;
3341  ff_mpeg4_set_direct_mv(s, 0, 0);
3342  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3343  &dmin, &next_block, 0, 0);
3344  }
3345  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3346  int coded=0;
3347  for(i=0; i<6; i++)
3348  coded |= s->block_last_index[i];
3349  if(coded){
3350  int mx,my;
3351  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3352  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3353  mx=my=0; //FIXME find the one we actually used
3354  ff_mpeg4_set_direct_mv(s, mx, my);
3355  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3356  mx= s->mv[1][0][0];
3357  my= s->mv[1][0][1];
3358  }else{
3359  mx= s->mv[0][0][0];
3360  my= s->mv[0][0][1];
3361  }
3362 
3363  s->mv_dir= best_s.mv_dir;
3364  s->mv_type = best_s.mv_type;
3365  s->mb_intra= 0;
3366 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3367  s->mv[0][0][1] = best_s.mv[0][0][1];
3368  s->mv[1][0][0] = best_s.mv[1][0][0];
3369  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3370  backup_s.dquant= 0;
3371  s->skipdct=1;
3372  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3373  &dmin, &next_block, mx, my);
3374  s->skipdct=0;
3375  }
3376  }
3377 
3378  s->current_picture.qscale_table[xy] = best_s.qscale;
3379 
3380  copy_context_after_encode(s, &best_s, -1);
3381 
3382  pb_bits_count= put_bits_count(&s->pb);
3383  flush_put_bits(&s->pb);
3384  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3385  s->pb= backup_s.pb;
3386 
3387  if(s->data_partitioning){
3388  pb2_bits_count= put_bits_count(&s->pb2);
3389  flush_put_bits(&s->pb2);
3390  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3391  s->pb2= backup_s.pb2;
3392 
3393  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3394  flush_put_bits(&s->tex_pb);
3395  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3396  s->tex_pb= backup_s.tex_pb;
3397  }
3398  s->last_bits= put_bits_count(&s->pb);
3399 
3400  if (CONFIG_H263_ENCODER &&
3403 
3404  if(next_block==0){ //FIXME 16 vs linesize16
3405  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3406  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3407  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3408  }
3409 
3411  ff_mpv_decode_mb(s, s->block);
3412  } else {
3413  int motion_x = 0, motion_y = 0;
3415  // only one MB-Type possible
3416 
3417  switch(mb_type){
3419  s->mv_dir = 0;
3420  s->mb_intra= 1;
3421  motion_x= s->mv[0][0][0] = 0;
3422  motion_y= s->mv[0][0][1] = 0;
3423  break;
3425  s->mv_dir = MV_DIR_FORWARD;
3426  s->mb_intra= 0;
3427  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3428  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3429  break;
3431  s->mv_dir = MV_DIR_FORWARD;
3432  s->mv_type = MV_TYPE_FIELD;
3433  s->mb_intra= 0;
3434  for(i=0; i<2; i++){
3435  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3436  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3437  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3438  }
3439  break;
3441  s->mv_dir = MV_DIR_FORWARD;
3442  s->mv_type = MV_TYPE_8X8;
3443  s->mb_intra= 0;
3444  for(i=0; i<4; i++){
3445  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3446  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3447  }
3448  break;
3450  if (CONFIG_MPEG4_ENCODER) {
3452  s->mb_intra= 0;
3453  motion_x=s->b_direct_mv_table[xy][0];
3454  motion_y=s->b_direct_mv_table[xy][1];
3455  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3456  }
3457  break;
3459  if (CONFIG_MPEG4_ENCODER) {
3461  s->mb_intra= 0;
3462  ff_mpeg4_set_direct_mv(s, 0, 0);
3463  }
3464  break;
3467  s->mb_intra= 0;
3468  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3469  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3470  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3471  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3472  break;
3474  s->mv_dir = MV_DIR_BACKWARD;
3475  s->mb_intra= 0;
3476  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3477  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3478  break;
3480  s->mv_dir = MV_DIR_FORWARD;
3481  s->mb_intra= 0;
3482  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3483  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3484  break;
3486  s->mv_dir = MV_DIR_FORWARD;
3487  s->mv_type = MV_TYPE_FIELD;
3488  s->mb_intra= 0;
3489  for(i=0; i<2; i++){
3490  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3491  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3492  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3493  }
3494  break;
3496  s->mv_dir = MV_DIR_BACKWARD;
3497  s->mv_type = MV_TYPE_FIELD;
3498  s->mb_intra= 0;
3499  for(i=0; i<2; i++){
3500  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3501  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3502  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3503  }
3504  break;
3507  s->mv_type = MV_TYPE_FIELD;
3508  s->mb_intra= 0;
3509  for(dir=0; dir<2; dir++){
3510  for(i=0; i<2; i++){
3511  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3512  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3513  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3514  }
3515  }
3516  break;
3517  default:
3518  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3519  }
3520 
3521  encode_mb(s, motion_x, motion_y);
3522 
3523  // RAL: Update last macroblock type
3524  s->last_mv_dir = s->mv_dir;
3525 
3526  if (CONFIG_H263_ENCODER &&
3529 
3530  ff_mpv_decode_mb(s, s->block);
3531  }
3532 
3533  /* clean the MV table in IPS frames for direct mode in B-frames */
3534  if(s->mb_intra /* && I,P,S_TYPE */){
3535  s->p_mv_table[xy][0]=0;
3536  s->p_mv_table[xy][1]=0;
3537  }
3538 
3539  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3540  int w= 16;
3541  int h= 16;
3542 
3543  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3544  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3545 
3547  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3548  s->dest[0], w, h, s->linesize);
3550  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3551  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3553  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3554  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3555  }
3556  if(s->loop_filter){
3557  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3559  }
3560  ff_dlog(s->avctx, "MB %d %d bits\n",
3561  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3562  }
3563  }
3564 
3565  //not beautiful here but we must write it before flushing so it has to be here
3568 
3569  write_slice_end(s);
3570 
3571 #if FF_API_RTP_CALLBACK
3573  /* Send the last GOB if RTP */
3574  if (s->avctx->rtp_callback) {
3575  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3576  int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3577  /* Call the RTP callback to send the last GOB */
3578  emms_c();
3579  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3580  }
3582 #endif
3583 
3584  return 0;
3585 }
3586 
3587 #define MERGE(field) dst->field += src->field; src->field=0
3589  MERGE(me.scene_change_score);
3590  MERGE(me.mc_mb_var_sum_temp);
3591  MERGE(me.mb_var_sum_temp);
3592 }
3593 
3595  int i;
3596 
3597  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3598  MERGE(dct_count[1]);
3599  MERGE(mv_bits);
3600  MERGE(i_tex_bits);
3601  MERGE(p_tex_bits);
3602  MERGE(i_count);
3603  MERGE(f_count);
3604  MERGE(b_count);
3605  MERGE(skip_count);
3606  MERGE(misc_bits);
3607  MERGE(er.error_count);
3612 
3613  if (dst->noise_reduction){
3614  for(i=0; i<64; i++){
3615  MERGE(dct_error_sum[0][i]);
3616  MERGE(dct_error_sum[1][i]);
3617  }
3618  }
3619 
3620  assert(put_bits_count(&src->pb) % 8 ==0);
3621  assert(put_bits_count(&dst->pb) % 8 ==0);
3622  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3623  flush_put_bits(&dst->pb);
3624 }
3625 
3626 static int estimate_qp(MpegEncContext *s, int dry_run){
3627  if (s->next_lambda){
3630  if(!dry_run) s->next_lambda= 0;
3631  } else if (!s->fixed_qscale) {
3634  if (s->current_picture.f->quality < 0)
3635  return -1;
3636  }
3637 
3638  if(s->adaptive_quant){
3639  switch(s->codec_id){
3640  case AV_CODEC_ID_MPEG4:
3641  if (CONFIG_MPEG4_ENCODER)
3643  break;
3644  case AV_CODEC_ID_H263:
3645  case AV_CODEC_ID_H263P:
3646  case AV_CODEC_ID_FLV1:
3647  if (CONFIG_H263_ENCODER)
3649  break;
3650  default:
3651  ff_init_qscale_tab(s);
3652  }
3653 
3654  s->lambda= s->lambda_table[0];
3655  //FIXME broken
3656  }else
3657  s->lambda = s->current_picture.f->quality;
3658  update_qscale(s);
3659  return 0;
3660 }
3661 
3662 /* must be called before writing the header */
3665  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3666 
3667  if(s->pict_type==AV_PICTURE_TYPE_B){
3668  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3669  assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3670  }else{
3671  s->pp_time= s->time - s->last_non_b_time;
3672  s->last_non_b_time= s->time;
3673  assert(s->picture_number==0 || s->pp_time > 0);
3674  }
3675 }
3676 
3678 {
3679  int i, ret;
3680  int bits;
3681  int context_count = s->slice_context_count;
3682 
3684 
3685  /* Reset the average MB variance */
3686  s->me.mb_var_sum_temp =
3687  s->me.mc_mb_var_sum_temp = 0;
3688 
3689  /* we need to initialize some time vars before we can encode B-frames */
3690  // RAL: Condition added for MPEG1VIDEO
3693  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3694  ff_set_mpeg4_time(s);
3695 
3696  s->me.scene_change_score=0;
3697 
3698 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3699 
3700  if(s->pict_type==AV_PICTURE_TYPE_I){
3701  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3702  else s->no_rounding=0;
3703  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3705  s->no_rounding ^= 1;
3706  }
3707 
3708  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3709  if (estimate_qp(s,1) < 0)
3710  return -1;
3711  ff_get_2pass_fcode(s);
3712  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3714  s->lambda= s->last_lambda_for[s->pict_type];
3715  else
3717  update_qscale(s);
3718  }
3719 
3725  }
3726 
3727  s->mb_intra=0; //for the rate distortion & bit compare functions
3728  for(i=1; i<context_count; i++){
3730  if (ret < 0)
3731  return ret;
3732  }
3733 
3734  if(ff_init_me(s)<0)
3735  return -1;
3736 
3737  /* Estimate motion for every MB */
3738  if(s->pict_type != AV_PICTURE_TYPE_I){
3739  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3740  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3741  if (s->pict_type != AV_PICTURE_TYPE_B) {
3742  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3743  s->me_pre == 2) {
3744  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3745  }
3746  }
3747 
3748  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3749  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3750  /* I-Frame */
3751  for(i=0; i<s->mb_stride*s->mb_height; i++)
3753 
3754  if(!s->fixed_qscale){
3755  /* finding spatial complexity for I-frame rate control */
3756  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3757  }
3758  }
3759  for(i=1; i<context_count; i++){
3761  }
3763  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3764  emms_c();
3765 
3767  s->pict_type == AV_PICTURE_TYPE_P) {
3769  for(i=0; i<s->mb_stride*s->mb_height; i++)
3771  if(s->msmpeg4_version >= 3)
3772  s->no_rounding=1;
3773  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3775  }
3776 
3777  if(!s->umvplus){
3780 
3782  int a,b;
3783  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3785  s->f_code= FFMAX3(s->f_code, a, b);
3786  }
3787 
3788  ff_fix_long_p_mvs(s);
3791  int j;
3792  for(i=0; i<2; i++){
3793  for(j=0; j<2; j++)
3796  }
3797  }
3798  }
3799 
3800  if(s->pict_type==AV_PICTURE_TYPE_B){
3801  int a, b;
3802 
3805  s->f_code = FFMAX(a, b);
3806 
3809  s->b_code = FFMAX(a, b);
3810 
3816  int dir, j;
3817  for(dir=0; dir<2; dir++){
3818  for(i=0; i<2; i++){
3819  for(j=0; j<2; j++){
3822  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3823  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3824  }
3825  }
3826  }
3827  }
3828  }
3829  }
3830 
3831  if (estimate_qp(s, 0) < 0)
3832  return -1;
3833 
3834  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3835  s->pict_type == AV_PICTURE_TYPE_I &&
3836  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3837  s->qscale= 3; //reduce clipping problems
3838 
3839  if (s->out_format == FMT_MJPEG) {
3840  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3841  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3842 
3843  if (s->avctx->intra_matrix) {
3844  chroma_matrix =
3845  luma_matrix = s->avctx->intra_matrix;
3846  }
3847  if (s->avctx->chroma_intra_matrix)
3848  chroma_matrix = s->avctx->chroma_intra_matrix;
3849 
3850  /* for mjpeg, we do include qscale in the matrix */
3851  for(i=1;i<64;i++){
3852  int j = s->idsp.idct_permutation[i];
3853 
3854  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3855  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3856  }
3857  s->y_dc_scale_table=
3859  s->chroma_intra_matrix[0] =
3862  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3864  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3865  s->qscale= 8;
3866  }
3867  if(s->codec_id == AV_CODEC_ID_AMV){
3868  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3869  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3870  for(i=1;i<64;i++){
3871  int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3872 
3873  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3874  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3875  }
3876  s->y_dc_scale_table= y;
3877  s->c_dc_scale_table= c;
3878  s->intra_matrix[0] = 13;
3879  s->chroma_intra_matrix[0] = 14;
3881  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3883  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3884  s->qscale= 8;
3885  }
3886 
3887  //FIXME var duplication
3889  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3892 
3893  if (s->current_picture.f->key_frame)
3894  s->picture_in_gop_number=0;
3895 
3896  s->mb_x = s->mb_y = 0;
3897  s->last_bits= put_bits_count(&s->pb);
3898  switch(s->out_format) {
3899  case FMT_MJPEG:
3900  if (CONFIG_MJPEG_ENCODER)
3903  break;
3904  case FMT_H261:
3905  if (CONFIG_H261_ENCODER)
3906  ff_h261_encode_picture_header(s, picture_number);
3907  break;
3908  case FMT_H263:
3909  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3910  ff_wmv2_encode_picture_header(s, picture_number);
3911  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3912  ff_msmpeg4_encode_picture_header(s, picture_number);
3913  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3914  ret = ff_mpeg4_encode_picture_header(s, picture_number);
3915  if (ret < 0)
3916  return ret;
3917  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3918  ret = ff_rv10_encode_picture_header(s, picture_number);
3919  if (ret < 0)
3920  return ret;
3921  }
3922  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3923  ff_rv20_encode_picture_header(s, picture_number);
3924  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3925  ff_flv_encode_picture_header(s, picture_number);
3926  else if (CONFIG_H263_ENCODER)
3927  ff_h263_encode_picture_header(s, picture_number);
3928  break;
3929  case FMT_MPEG1:
3930  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3931  ff_mpeg1_encode_picture_header(s, picture_number);
3932  break;
3933  default:
3934  av_assert0(0);
3935  }
3936  bits= put_bits_count(&s->pb);
3937  s->header_bits= bits - s->last_bits;
3938 
3939  for(i=1; i<context_count; i++){
3941  }
3942  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3943  for(i=1; i<context_count; i++){
3944  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3945  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3947  }
3948  emms_c();
3949  return 0;
3950 }
3951 
3952 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3953  const int intra= s->mb_intra;
3954  int i;
3955 
3956  s->dct_count[intra]++;
3957 
3958  for(i=0; i<64; i++){
3959  int level= block[i];
3960 
3961  if(level){
3962  if(level>0){
3963  s->dct_error_sum[intra][i] += level;
3964  level -= s->dct_offset[intra][i];
3965  if(level<0) level=0;
3966  }else{
3967  s->dct_error_sum[intra][i] -= level;
3968  level += s->dct_offset[intra][i];
3969  if(level>0) level=0;
3970  }
3971  block[i]= level;
3972  }
3973  }
3974 }
3975 
3977  int16_t *block, int n,
3978  int qscale, int *overflow){
3979  const int *qmat;
3980  const uint16_t *matrix;
3981  const uint8_t *scantable= s->intra_scantable.scantable;
3982  const uint8_t *perm_scantable= s->intra_scantable.permutated;
3983  int max=0;
3984  unsigned int threshold1, threshold2;
3985  int bias=0;
3986  int run_tab[65];
3987  int level_tab[65];
3988  int score_tab[65];
3989  int survivor[65];
3990  int survivor_count;
3991  int last_run=0;
3992  int last_level=0;
3993  int last_score= 0;
3994  int last_i;
3995  int coeff[2][64];
3996  int coeff_count[64];
3997  int qmul, qadd, start_i, last_non_zero, i, dc;
3998  const int esc_length= s->ac_esc_length;
3999  uint8_t * length;
4000  uint8_t * last_length;
4001  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4002  int mpeg2_qscale;
4003 
4004  s->fdsp.fdct(block);
4005 
4006  if(s->dct_error_sum)
4007  s->denoise_dct(s, block);
4008  qmul= qscale*16;
4009  qadd= ((qscale-1)|1)*8;
4010 
4011  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4012  else mpeg2_qscale = qscale << 1;
4013 
4014  if (s->mb_intra) {
4015  int q;
4016  if (!s->h263_aic) {
4017  if (n < 4)
4018  q = s->y_dc_scale;
4019  else
4020  q = s->c_dc_scale;
4021  q = q << 3;
4022  } else{
4023  /* For AIC we skip quant/dequant of INTRADC */
4024  q = 1 << 3;
4025  qadd=0;
4026  }
4027 
4028  /* note: block[0] is assumed to be positive */
4029  block[0] = (block[0] + (q >> 1)) / q;
4030  start_i = 1;
4031  last_non_zero = 0;
4032  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4033  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4034  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4035  bias= 1<<(QMAT_SHIFT-1);
4036 
4037  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4038  length = s->intra_chroma_ac_vlc_length;
4039  last_length= s->intra_chroma_ac_vlc_last_length;
4040  } else {
4041  length = s->intra_ac_vlc_length;
4042  last_length= s->intra_ac_vlc_last_length;
4043  }
4044  } else {
4045  start_i = 0;
4046  last_non_zero = -1;
4047  qmat = s->q_inter_matrix[qscale];
4048  matrix = s->inter_matrix;
4049  length = s->inter_ac_vlc_length;
4050  last_length= s->inter_ac_vlc_last_length;
4051  }
4052  last_i= start_i;
4053 
4054  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4055  threshold2= (threshold1<<1);
4056 
4057  for(i=63; i>=start_i; i--) {
4058  const int j = scantable[i];
4059  int level = block[j] * qmat[j];
4060 
4061  if(((unsigned)(level+threshold1))>threshold2){
4062  last_non_zero = i;
4063  break;
4064  }
4065  }
4066 
4067  for(i=start_i; i<=last_non_zero; i++) {
4068  const int j = scantable[i];
4069  int level = block[j] * qmat[j];
4070 
4071 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4072 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4073  if(((unsigned)(level+threshold1))>threshold2){
4074  if(level>0){
4075  level= (bias + level)>>QMAT_SHIFT;
4076  coeff[0][i]= level;
4077  coeff[1][i]= level-1;
4078 // coeff[2][k]= level-2;
4079  }else{
4080  level= (bias - level)>>QMAT_SHIFT;
4081  coeff[0][i]= -level;
4082  coeff[1][i]= -level+1;
4083 // coeff[2][k]= -level+2;
4084  }
4085  coeff_count[i]= FFMIN(level, 2);
4086  av_assert2(coeff_count[i]);
4087  max |=level;
4088  }else{
4089  coeff[0][i]= (level>>31)|1;
4090  coeff_count[i]= 1;
4091  }
4092  }
4093 
4094  *overflow= s->max_qcoeff < max; //overflow might have happened
4095 
4096  if(last_non_zero < start_i){
4097  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4098  return last_non_zero;
4099  }
4100 
4101  score_tab[start_i]= 0;
4102  survivor[0]= start_i;
4103  survivor_count= 1;
4104 
4105  for(i=start_i; i<=last_non_zero; i++){
4106  int level_index, j, zero_distortion;
4107  int dct_coeff= FFABS(block[ scantable[i] ]);
4108  int best_score=256*256*256*120;
4109 
4110  if (s->fdsp.fdct == ff_fdct_ifast)
4111  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4112  zero_distortion= dct_coeff*dct_coeff;
4113 
4114  for(level_index=0; level_index < coeff_count[i]; level_index++){
4115  int distortion;
4116  int level= coeff[level_index][i];
4117  const int alevel= FFABS(level);
4118  int unquant_coeff;
4119 
4120  av_assert2(level);
4121 
4122  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4123  unquant_coeff= alevel*qmul + qadd;
4124  } else if(s->out_format == FMT_MJPEG) {
4125  j = s->idsp.idct_permutation[scantable[i]];
4126  unquant_coeff = alevel * matrix[j] * 8;
4127  }else{ // MPEG-1
4128  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4129  if(s->mb_intra){
4130  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4131  unquant_coeff = (unquant_coeff - 1) | 1;
4132  }else{
4133  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4134  unquant_coeff = (unquant_coeff - 1) | 1;
4135  }
4136  unquant_coeff<<= 3;
4137  }
4138 
4139  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4140  level+=64;
4141  if((level&(~127)) == 0){
4142  for(j=survivor_count-1; j>=0; j--){
4143  int run= i - survivor[j];
4144  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4145  score += score_tab[i-run];
4146 
4147  if(score < best_score){
4148  best_score= score;
4149  run_tab[i+1]= run;
4150  level_tab[i+1]= level-64;
4151  }
4152  }
4153 
4154  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4155  for(j=survivor_count-1; j>=0; j--){
4156  int run= i - survivor[j];
4157  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4158  score += score_tab[i-run];
4159  if(score < last_score){
4160  last_score= score;
4161  last_run= run;
4162  last_level= level-64;
4163  last_i= i+1;
4164  }
4165  }
4166  }
4167  }else{
4168  distortion += esc_length*lambda;
4169  for(j=survivor_count-1; j>=0; j--){
4170  int run= i - survivor[j];
4171  int score= distortion + score_tab[i-run];
4172 
4173  if(score < best_score){
4174  best_score= score;
4175  run_tab[i+1]= run;
4176  level_tab[i+1]= level-64;
4177  }
4178  }
4179 
4180  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4181  for(j=survivor_count-1; j>=0; j--){
4182  int run= i - survivor[j];
4183  int score= distortion + score_tab[i-run];
4184  if(score < last_score){
4185  last_score= score;
4186  last_run= run;
4187  last_level= level-64;
4188  last_i= i+1;
4189  }
4190  }
4191  }
4192  }
4193  }
4194 
4195  score_tab[i+1]= best_score;
4196 
4197  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4198  if(last_non_zero <= 27){
4199  for(; survivor_count; survivor_count--){
4200  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4201  break;
4202  }
4203  }else{
4204  for(; survivor_count; survivor_count--){
4205  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4206  break;
4207  }
4208  }
4209 
4210  survivor[ survivor_count++ ]= i+1;
4211  }
4212 
4213  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4214  last_score= 256*256*256*120;
4215  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4216  int score= score_tab[i];
4217  if (i)
4218  score += lambda * 2; // FIXME more exact?
4219 
4220  if(score < last_score){
4221  last_score= score;
4222  last_i= i;
4223  last_level= level_tab[i];
4224  last_run= run_tab[i];
4225  }
4226  }
4227  }
4228 
4229  s->coded_score[n] = last_score;
4230 
4231  dc= FFABS(block[0]);
4232  last_non_zero= last_i - 1;
4233  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4234 
4235  if(last_non_zero < start_i)
4236  return last_non_zero;
4237 
4238  if(last_non_zero == 0 && start_i == 0){
4239  int best_level= 0;
4240  int best_score= dc * dc;
4241 
4242  for(i=0; i<coeff_count[0]; i++){
4243  int level= coeff[i][0];
4244  int alevel= FFABS(level);
4245  int unquant_coeff, score, distortion;
4246 
4247  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4248  unquant_coeff= (alevel*qmul + qadd)>>3;
4249  } else{ // MPEG-1
4250  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4251  unquant_coeff = (unquant_coeff - 1) | 1;
4252  }
4253  unquant_coeff = (unquant_coeff + 4) >> 3;
4254  unquant_coeff<<= 3 + 3;
4255 
4256  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4257  level+=64;
4258  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4259  else score= distortion + esc_length*lambda;
4260 
4261  if(score < best_score){
4262  best_score= score;
4263  best_level= level - 64;
4264  }
4265  }
4266  block[0]= best_level;
4267  s->coded_score[n] = best_score - dc*dc;
4268  if(best_level == 0) return -1;
4269  else return last_non_zero;
4270  }
4271 
4272  i= last_i;
4273  av_assert2(last_level);
4274 
4275  block[ perm_scantable[last_non_zero] ]= last_level;
4276  i -= last_run + 1;
4277 
4278  for(; i>start_i; i -= run_tab[i] + 1){
4279  block[ perm_scantable[i-1] ]= level_tab[i];
4280  }
4281 
4282  return last_non_zero;
4283 }
4284 
4285 //#define REFINE_STATS 1
4286 static int16_t basis[64][64];
4287 
4288 static void build_basis(uint8_t *perm){
4289  int i, j, x, y;
4290  emms_c();
4291  for(i=0; i<8; i++){
4292  for(j=0; j<8; j++){
4293  for(y=0; y<8; y++){
4294  for(x=0; x<8; x++){
4295  double s= 0.25*(1<<BASIS_SHIFT);
4296  int index= 8*i + j;
4297  int perm_index= perm[index];
4298  if(i==0) s*= sqrt(0.5);
4299  if(j==0) s*= sqrt(0.5);
4300  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4301  }
4302  }
4303  }
4304  }
4305 }
4306 
4307 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4308  int16_t *block, int16_t *weight, int16_t *orig,
4309  int n, int qscale){
4310  int16_t rem[64];
4311  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4312  const uint8_t *scantable= s->intra_scantable.scantable;
4313  const uint8_t *perm_scantable= s->intra_scantable.permutated;
4314 // unsigned int threshold1, threshold2;
4315 // int bias=0;
4316  int run_tab[65];
4317  int prev_run=0;
4318  int prev_level=0;
4319  int qmul, qadd, start_i, last_non_zero, i, dc;
4320  uint8_t * length;
4321  uint8_t * last_length;
4322  int lambda;
4323  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4324 #ifdef REFINE_STATS
4325 static int count=0;
4326 static int after_last=0;
4327 static int to_zero=0;
4328 static int from_zero=0;
4329 static int raise=0;
4330 static int lower=0;
4331 static int messed_sign=0;
4332 #endif
4333 
4334  if(basis[0][0] == 0)
4336 
4337  qmul= qscale*2;
4338  qadd= (qscale-1)|1;
4339  if (s->mb_intra) {
4340  if (!s->h263_aic) {
4341  if (n < 4)
4342  q = s->y_dc_scale;
4343  else
4344  q = s->c_dc_scale;
4345  } else{
4346  /* For AIC we skip quant/dequant of INTRADC */
4347  q = 1;
4348  qadd=0;
4349  }
4350  q <<= RECON_SHIFT-3;
4351  /* note: block[0] is assumed to be positive */
4352  dc= block[0]*q;
4353 // block[0] = (block[0] + (q >> 1)) / q;
4354  start_i = 1;
4355 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4356 // bias= 1<<(QMAT_SHIFT-1);
4357  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4358  length = s->intra_chroma_ac_vlc_length;
4359  last_length= s->intra_chroma_ac_vlc_last_length;
4360  } else {
4361  length = s->intra_ac_vlc_length;
4362  last_length= s->intra_ac_vlc_last_length;
4363  }
4364  } else {
4365  dc= 0;
4366  start_i = 0;
4367  length = s->inter_ac_vlc_length;
4368  last_length= s->inter_ac_vlc_last_length;
4369  }
4370  last_non_zero = s->block_last_index[n];
4371 
4372 #ifdef REFINE_STATS
4373 {START_TIMER
4374 #endif
4375  dc += (1<<(RECON_SHIFT-1));
4376  for(i=0; i<64; i++){
4377  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4378  }
4379 #ifdef REFINE_STATS
4380 STOP_TIMER("memset rem[]")}
4381 #endif
4382  sum=0;
4383  for(i=0; i<64; i++){
4384  int one= 36;
4385  int qns=4;
4386  int w;
4387 
4388  w= FFABS(weight[i]) + qns*one;
4389  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4390 
4391  weight[i] = w;
4392 // w=weight[i] = (63*qns + (w/2)) / w;
4393 
4394  av_assert2(w>0);
4395  av_assert2(w<(1<<6));
4396  sum += w*w;
4397  }
4398  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4399 #ifdef REFINE_STATS
4400 {START_TIMER
4401 #endif
4402  run=0;
4403  rle_index=0;
4404  for(i=start_i; i<=last_non_zero; i++){
4405  int j= perm_scantable[i];
4406  const int level= block[j];
4407  int coeff;
4408 
4409  if(level){
4410  if(level<0) coeff= qmul*level - qadd;
4411  else coeff= qmul*level + qadd;
4412  run_tab[rle_index++]=run;
4413  run=0;
4414 
4415  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4416  }else{
4417  run++;
4418  }
4419  }
4420 #ifdef REFINE_STATS
4421 if(last_non_zero>0){
4422 STOP_TIMER("init rem[]")
4423 }
4424 }
4425 
4426 {START_TIMER
4427 #endif
4428  for(;;){
4429  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4430  int best_coeff=0;
4431  int best_change=0;
4432  int run2, best_unquant_change=0, analyze_gradient;
4433 #ifdef REFINE_STATS
4434 {START_TIMER
4435 #endif
4436  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4437 
4438  if(analyze_gradient){
4439 #ifdef REFINE_STATS
4440 {START_TIMER
4441 #endif
4442  for(i=0; i<64; i++){
4443  int w= weight[i];
4444 
4445  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4446  }
4447 #ifdef REFINE_STATS
4448 STOP_TIMER("rem*w*w")}
4449 {START_TIMER
4450 #endif
4451  s->fdsp.fdct(d1);
4452 #ifdef REFINE_STATS
4453 STOP_TIMER("dct")}
4454 #endif
4455  }
4456 
4457  if(start_i){
4458  const int level= block[0];
4459  int change, old_coeff;
4460 
4461  av_assert2(s->mb_intra);
4462 
4463  old_coeff= q*level;
4464 
4465  for(change=-1; change<=1; change+=2){
4466  int new_level= level + change;
4467  int score, new_coeff;
4468 
4469  new_coeff= q*new_level;
4470  if(new_coeff >= 2048 || new_coeff < 0)
4471  continue;
4472 
4473  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4474  new_coeff - old_coeff);
4475  if(score<best_score){
4476  best_score= score;
4477  best_coeff= 0;
4478  best_change= change;
4479  best_unquant_change= new_coeff - old_coeff;
4480  }
4481  }
4482  }
4483 
4484  run=0;
4485  rle_index=0;
4486  run2= run_tab[rle_index++];
4487  prev_level=0;
4488  prev_run=0;
4489 
4490  for(i=start_i; i<64; i++){
4491  int j= perm_scantable[i];
4492  const int level= block[j];
4493  int change, old_coeff;
4494 
4495  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4496  break;
4497 
4498  if(level){
4499  if(level<0) old_coeff= qmul*level - qadd;
4500  else old_coeff= qmul*level + qadd;
4501  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4502  }else{
4503  old_coeff=0;
4504  run2--;
4505  av_assert2(run2>=0 || i >= last_non_zero );
4506  }
4507 
4508  for(change=-1; change<=1; change+=2){
4509  int new_level= level + change;
4510  int score, new_coeff, unquant_change;
4511 
4512  score=0;
4513  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4514  continue;
4515 
4516  if(new_level){
4517  if(new_level<0) new_coeff= qmul*new_level - qadd;
4518  else new_coeff= qmul*new_level + qadd;
4519  if(new_coeff >= 2048 || new_coeff <= -2048)
4520  continue;
4521  //FIXME check for overflow
4522 
4523  if(level){
4524  if(level < 63 && level > -63){
4525  if(i < last_non_zero)
4526  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4527  - length[UNI_AC_ENC_INDEX(run, level+64)];
4528  else
4529  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4530  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4531  }
4532  }else{
4533  av_assert2(FFABS(new_level)==1);
4534 
4535  if(analyze_gradient){
4536  int g= d1[ scantable[i] ];
4537  if(g && (g^new_level) >= 0)
4538  continue;
4539  }
4540 
4541  if(i < last_non_zero){
4542  int next_i= i + run2 + 1;
4543  int next_level= block[ perm_scantable[next_i] ] + 64;
4544 
4545  if(next_level&(~127))
4546  next_level= 0;
4547 
4548  if(next_i < last_non_zero)
4549  score += length[UNI_AC_ENC_INDEX(run, 65)]
4550  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4551  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4552  else
4553  score += length[UNI_AC_ENC_INDEX(run, 65)]
4554  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4555  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4556  }else{
4557  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4558  if(prev_level){
4559  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4560  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4561  }
4562  }
4563  }
4564  }else{
4565  new_coeff=0;
4566  av_assert2(FFABS(level)==1);
4567 
4568  if(i < last_non_zero){
4569  int next_i= i + run2 + 1;
4570  int next_level= block[ perm_scantable[next_i] ] + 64;
4571 
4572  if(next_level&(~127))
4573  next_level= 0;
4574 
4575  if(next_i < last_non_zero)
4576  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4577  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4578  - length[UNI_AC_ENC_INDEX(run, 65)];
4579  else
4580  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4581  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4582  - length[UNI_AC_ENC_INDEX(run, 65)];
4583  }else{
4584  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4585  if(prev_level){
4586  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4587  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4588  }
4589  }
4590  }
4591 
4592  score *= lambda;
4593 
4594  unquant_change= new_coeff - old_coeff;
4595  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4596 
4597  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4598  unquant_change);
4599  if(score<best_score){
4600  best_score= score;
4601  best_coeff= i;
4602  best_change= change;
4603  best_unquant_change= unquant_change;
4604  }
4605  }
4606  if(level){
4607  prev_level= level + 64;
4608  if(prev_level&(~127))
4609  prev_level= 0;
4610  prev_run= run;
4611  run=0;
4612  }else{
4613  run++;
4614  }
4615  }
4616 #ifdef REFINE_STATS
4617 STOP_TIMER("iterative step")}
4618 #endif
4619 
4620  if(best_change){
4621  int j= perm_scantable[ best_coeff ];
4622 
4623  block[j] += best_change;
4624 
4625  if(best_coeff > last_non_zero){
4626  last_non_zero= best_coeff;
4627  av_assert2(block[j]);
4628 #ifdef REFINE_STATS
4629 after_last++;
4630 #endif
4631  }else{
4632 #ifdef REFINE_STATS
4633 if(block[j]){
4634  if(block[j] - best_change){
4635  if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4636  raise++;
4637  }else{
4638  lower++;
4639  }
4640  }else{
4641  from_zero++;
4642  }
4643 }else{
4644  to_zero++;
4645 }
4646 #endif
4647  for(; last_non_zero>=start_i; last_non_zero--){
4648  if(block[perm_scantable[last_non_zero]])
4649  break;
4650  }
4651  }
4652 #ifdef REFINE_STATS
4653 count++;
4654 if(256*256*256*64 % count == 0){
4655  av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4656 }
4657 #endif
4658  run=0;
4659  rle_index=0;
4660  for(i=start_i; i<=last_non_zero; i++){
4661  int j= perm_scantable[i];
4662  const int level= block[j];
4663 
4664  if(level){
4665  run_tab[rle_index++]=run;
4666  run=0;
4667  }else{
4668  run++;
4669  }
4670  }
4671 
4672  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4673  }else{
4674  break;
4675  }
4676  }
4677 #ifdef REFINE_STATS
4678 if(last_non_zero>0){
4679 STOP_TIMER("iterative search")
4680 }
4681 }
4682 #endif
4683 
4684  return last_non_zero;
4685 }
4686 
4687 /**
4688  * Permute an 8x8 block according to permutation.
4689  * @param block the block which will be permuted according to
4690  * the given permutation vector
4691  * @param permutation the permutation vector
4692  * @param last the last non zero coefficient in scantable order, used to
4693  * speed the permutation up
4694  * @param scantable the used scantable, this is only used to speed the
4695  * permutation up, the block is not (inverse) permutated
4696  * to scantable order!
4697  */
4698 void ff_block_permute(int16_t *block, uint8_t *permutation,
4699  const uint8_t *scantable, int last)
4700 {
4701  int i;
4702  int16_t temp[64];
4703 
4704  if (last <= 0)
4705  return;
4706  //FIXME it is ok but not clean and might fail for some permutations
4707  // if (permutation[1] == 1)
4708  // return;
4709 
4710  for (i = 0; i <= last; i++) {
4711  const int j = scantable[i];
4712  temp[j] = block[j];
4713  block[j] = 0;
4714  }
4715 
4716  for (i = 0; i <= last; i++) {
4717  const int j = scantable[i];
4718  const int perm_j = permutation[j];
4719  block[perm_j] = temp[j];
4720  }
4721 }
4722 
4724  int16_t *block, int n,
4725  int qscale, int *overflow)
4726 {
4727  int i, j, level, last_non_zero, q, start_i;
4728  const int *qmat;
4729  const uint8_t *scantable= s->intra_scantable.scantable;
4730  int bias;
4731  int max=0;
4732  unsigned int threshold1, threshold2;
4733 
4734  s->fdsp.fdct(block);
4735 
4736  if(s->dct_error_sum)
4737  s->denoise_dct(s, block);
4738 
4739  if (s->mb_intra) {
4740  if (!s->h263_aic) {
4741  if (n < 4)
4742  q = s->y_dc_scale;
4743  else
4744  q = s->c_dc_scale;
4745  q = q << 3;
4746  } else
4747  /* For AIC we skip quant/dequant of INTRADC */
4748  q = 1 << 3;
4749 
4750  /* note: block[0] is assumed to be positive */
4751  block[0] = (block[0] + (q >> 1)) / q;
4752  start_i = 1;
4753  last_non_zero = 0;
4754  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4755  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4756  } else {
4757  start_i = 0;
4758  last_non_zero = -1;
4759  qmat = s->q_inter_matrix[qscale];
4760  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4761  }
4762  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4763  threshold2= (threshold1<<1);
4764  for(i=63;i>=start_i;i--) {
4765  j = scantable[i];
4766  level = block[j] * qmat[j];
4767 
4768  if(((unsigned)(level+threshold1))>threshold2){
4769  last_non_zero = i;
4770  break;
4771  }else{
4772  block[j]=0;
4773  }
4774  }
4775  for(i=start_i; i<=last_non_zero; i++) {
4776  j = scantable[i];
4777  level = block[j] * qmat[j];
4778 
4779 // if( bias+level >= (1<<QMAT_SHIFT)
4780 // || bias-level >= (1<<QMAT_SHIFT)){
4781  if(((unsigned)(level+threshold1))>threshold2){
4782  if(level>0){
4783  level= (bias + level)>>QMAT_SHIFT;
4784  block[j]= level;
4785  }else{
4786  level= (bias - level)>>QMAT_SHIFT;
4787  block[j]= -level;
4788  }
4789  max |=level;
4790  }else{
4791  block[j]=0;
4792  }
4793  }
4794  *overflow= s->max_qcoeff < max; //overflow might have happened
4795 
4796  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4797  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4799  scantable, last_non_zero);
4800 
4801  return last_non_zero;
4802 }
4803 
4804 #define OFFSET(x) offsetof(MpegEncContext, x)
4805 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4806 static const AVOption h263_options[] = {
4807  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4808  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4810  { NULL },
4811 };
4812 
4813 static const AVClass h263_class = {
4814  .class_name = "H.263 encoder",
4815  .item_name = av_default_item_name,
4816  .option = h263_options,
4817  .version = LIBAVUTIL_VERSION_INT,
4818 };
4819 
4821  .name = "h263",
4822  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4823  .type = AVMEDIA_TYPE_VIDEO,
4824  .id = AV_CODEC_ID_H263,
4825  .priv_data_size = sizeof(MpegEncContext),
4827  .encode2 = ff_mpv_encode_picture,
4828  .close = ff_mpv_encode_end,
4830  .priv_class = &h263_class,
4831 };
4832 
4833 static const AVOption h263p_options[] = {
4834  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4835  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4836  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4837  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4839  { NULL },
4840 };
4841 static const AVClass h263p_class = {
4842  .class_name = "H.263p encoder",
4843  .item_name = av_default_item_name,
4844  .option = h263p_options,
4845  .version = LIBAVUTIL_VERSION_INT,
4846 };
4847 
4849  .name = "h263p",
4850  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4851  .type = AVMEDIA_TYPE_VIDEO,
4852  .id = AV_CODEC_ID_H263P,
4853  .priv_data_size = sizeof(MpegEncContext),
4855  .encode2 = ff_mpv_encode_picture,
4856  .close = ff_mpv_encode_end,
4857  .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4859  .priv_class = &h263p_class,
4860 };
4861 
4862 static const AVClass msmpeg4v2_class = {
4863  .class_name = "msmpeg4v2 encoder",
4864  .item_name = av_default_item_name,
4865  .option = ff_mpv_generic_options,
4866  .version = LIBAVUTIL_VERSION_INT,
4867 };
4868 
4870  .name = "msmpeg4v2",
4871  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4872  .type = AVMEDIA_TYPE_VIDEO,
4873  .id = AV_CODEC_ID_MSMPEG4V2,
4874  .priv_data_size = sizeof(MpegEncContext),
4876  .encode2 = ff_mpv_encode_picture,
4877  .close = ff_mpv_encode_end,
4879  .priv_class = &msmpeg4v2_class,
4880 };
4881 
4882 static const AVClass msmpeg4v3_class = {
4883  .class_name = "msmpeg4v3 encoder",
4884  .item_name = av_default_item_name,
4885  .option = ff_mpv_generic_options,
4886  .version = LIBAVUTIL_VERSION_INT,
4887 };
4888 
4890  .name = "msmpeg4",
4891  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4892  .type = AVMEDIA_TYPE_VIDEO,
4893  .id = AV_CODEC_ID_MSMPEG4V3,
4894  .priv_data_size = sizeof(MpegEncContext),
4896  .encode2 = ff_mpv_encode_picture,
4897  .close = ff_mpv_encode_end,
4899  .priv_class = &msmpeg4v3_class,
4900 };
4901 
4902 static const AVClass wmv1_class = {
4903  .class_name = "wmv1 encoder",
4904  .item_name = av_default_item_name,
4905  .option = ff_mpv_generic_options,
4906  .version = LIBAVUTIL_VERSION_INT,
4907 };
4908 
4910  .name = "wmv1",
4911  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4912  .type = AVMEDIA_TYPE_VIDEO,
4913  .id = AV_CODEC_ID_WMV1,
4914  .priv_data_size = sizeof(MpegEncContext),
4916  .encode2 = ff_mpv_encode_picture,
4917  .close = ff_mpv_encode_end,
4919  .priv_class = &wmv1_class,
4920 };
int last_time_base
Definition: mpegvideo.h:386
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:895
int plane
Definition: avisynth_c.h:422
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:2899
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1009
static const AVClass wmv1_class
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:108
int(* try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
int chroma_elim_threshold
Definition: mpegvideo.h:114
#define INPLACE_OFFSET
Definition: mpegutils.h:123
void ff_jpeg_fdct_islow_10(int16_t *data)
static const AVOption h263_options[]
int frame_bits
bits used for the current frame
Definition: mpegvideo.h:338
IDCTDSPContext idsp
Definition: mpegvideo.h:227
av_cold int ff_dct_encode_init(MpegEncContext *s)
#define NULL
Definition: coverity.c:32
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideo.h:341
const struct AVCodec * codec
Definition: avcodec.h:1685
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:312
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:571
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2739
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1510
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:74
int picture_number
Definition: mpegvideo.h:124
const char * s
Definition: avisynth_c.h:768
#define RECON_SHIFT
attribute_deprecated int intra_quant_bias
Definition: avcodec.h:2205
me_cmp_func frame_skip_cmp[6]
Definition: me_cmp.h:76
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:109
rate control context.
Definition: ratecontrol.h:63
static int shift(int a, int b)
Definition: sonic.c:82
S(GMC)-VOP MPEG-4.
Definition: avutil.h:271
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:519
void ff_mpeg1_encode_init(MpegEncContext *s)
Definition: mpeg12enc.c:1002
int esc3_level_length
Definition: mpegvideo.h:437
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2266
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
int time_increment_bits
< number of bits to represent the fractional part of time (encoder only)
Definition: mpegvideo.h:385
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:103
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:3108
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
Definition: mpegvideo.h:245
#define FF_CMP_DCTMAX
Definition: avcodec.h:2126
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:2241
AVOption.
Definition: opt.h:245
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:646
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideo.h:279
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:150
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:905
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:185
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegpicture.h:74
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:3015
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:874
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
Definition: mpegvideo.h:278
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
int pre_pass
= 1 for the pre pass
Definition: motion_est.h:72
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:857
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:570
AVFrame * tmp_frames[MAX_B_FRAMES+2]
Definition: mpegvideo.h:555
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:206
attribute_deprecated int rc_qmod_freq
Definition: avcodec.h:2645
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
RateControlEntry * entry
Definition: ratecontrol.h:65
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:75
void(* shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height)
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:116
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1741
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
else temp
Definition: vf_mcdeint.c:259
attribute_deprecated void(* rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb)
Definition: avcodec.h:2802
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:367
const char * g
Definition: vf_curves.c:112
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:761
const char * desc
Definition: nvenc.c:101
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:151
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
#define OFFSET(x)
uint16_t * mb_var
Table for MB variances.
Definition: mpegpicture.h:65
uint16_t(* q_chroma_intra_matrix16)[2][64]
Definition: mpegvideo.h:328
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:301
static int estimate_qp(MpegEncContext *s, int dry_run)
#define MAX_MV
Definition: motion_est.h:35
int acc
Definition: yuv2rgb.c:546
int max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: avcodec.h:1316
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1962
int16_t(*[3] ac_val)[16]
used for MPEG-4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:191
MJPEG encoder.
void(* add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:129
attribute_deprecated int frame_skip_cmp
Definition: avcodec.h:2767
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:604
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2413
#define me
int frame_skip_cmp
Definition: mpegvideo.h:563
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
Definition: mpegvideo.h:435
int b_frame_strategy
Definition: mpegvideo.h:556
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:114
int num
Numerator.
Definition: rational.h:59
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
int size
Definition: avcodec.h:1602
attribute_deprecated int lmax
Definition: avcodec.h:2749
enum AVCodecID codec_id
Definition: mpegvideo.h:109
const char * b
Definition: vf_curves.c:113
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:742
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:65
int av_log2(unsigned v)
Definition: intmath.c:26
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:366
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
Definition: bitstream.c:49
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:122
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:2087
int frame_skip_exp
Definition: mpegvideo.h:562
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:40
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1904
#define FF_MPV_FLAG_NAQ
Definition: mpegvideo.h:574
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced P-frame encoding.
Definition: mpegvideo.h:251
static int select_input_picture(MpegEncContext *s)
static const AVClass msmpeg4v3_class
int min_qcoeff
minimum encodable coefficient
Definition: mpegvideo.h:308
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:120
int out_size
Definition: movenc.c:55
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:2112
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
int coded_score[12]
Definition: mpegvideo.h:320
mpegvideo header.
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:238
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:71
int scene_change_score
Definition: motion_est.h:87
int mpv_flags
flags set by private options
Definition: mpegvideo.h:525
uint8_t permutated[64]
Definition: idctdsp.h:31
static const AVClass h263_class
uint8_t run
Definition: svq3.c:206
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3077
uint8_t * intra_ac_vlc_length
Definition: mpegvideo.h:311
#define EDGE_TOP
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
Definition: mpegvideo.h:409
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:358
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:318
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:130
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
#define FF_LAMBDA_SHIFT
Definition: avutil.h:219
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
QpelDSPContext qdsp
Definition: mpegvideo.h:232
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: avcodec.h:1410
AVCodec.
Definition: avcodec.h:3600
#define MAX_FCODE
Definition: mpegutils.h:48
static void write_mb_info(MpegEncContext *s)
int time_base
time in seconds of last I,P,S Frame
Definition: mpegvideo.h:387
uint8_t(* mv_penalty)[MAX_DMV *2+1]
bit amount needed to encode a MV
Definition: motion_est.h:93
int qscale
QP.
Definition: mpegvideo.h:201
int h263_aic
Advanced INTRA Coding (AIC)
Definition: mpegvideo.h:84
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame encoding.
Definition: mpegvideo.h:247
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:309
int min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: avcodec.h:1321
int chroma_x_shift
Definition: mpegvideo.h:475
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:111
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:513
int field_select[2][2]
Definition: mpegvideo.h:277
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:517
attribute_deprecated int frame_skip_exp
Definition: avcodec.h:2763
attribute_deprecated int me_method
This option does nothing.
Definition: avcodec.h:1911
uint32_t ff_square_tab[512]
Definition: me_cmp.c:32
int quant_precision
Definition: mpegvideo.h:398
void ff_mpeg4_merge_partitions(MpegEncContext *s)
static int mb_var_thread(AVCodecContext *c, void *arg)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:2431
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:515
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1813
int modified_quant
Definition: mpegvideo.h:379
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:573
int skipdct
skip dct and code zero residual
Definition: mpegvideo.h:217
float rc_buffer_aggressivity
Definition: mpegvideo.h:536
int b_frame_score
Definition: mpegpicture.h:84
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:27
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: avcodec.h:1341
static int16_t block[64]
Definition: dct.c:113
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:45
attribute_deprecated int mv_bits
Definition: avcodec.h:2819
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:107
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:2048
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideo.h:125
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
int alt_inter_vlc
alternative inter vlc
Definition: mpegvideo.h:378
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
Definition: mpeg12enc.c:407
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
uint8_t * ptr_lastgob
Definition: mpegvideo.h:492
int64_t time
time of current frame
Definition: mpegvideo.h:388
static int encode_picture(MpegEncContext *s, int picture_number)
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1749
static const AVClass msmpeg4v2_class
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4) ...
Definition: mpegvideo.h:264
ScratchpadContext sc
Definition: mpegvideo.h:199
uint8_t bits
Definition: crc.c:296
attribute_deprecated const char * rc_eq
Definition: avcodec.h:2668
attribute_deprecated float rc_buffer_aggressivity
Definition: avcodec.h:2690
uint8_t
#define av_cold
Definition: attributes.h:82
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:145
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
Picture ** input_picture
next pictures on display order for encoding
Definition: mpegvideo.h:134
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:108
void(* get_pixels)(int16_t *block, const uint8_t *pixels, ptrdiff_t line_size)
Definition: pixblockdsp.h:27
AVOptions.
void(* denoise_dct)(struct MpegEncContext *s, int16_t *block)
Definition: mpegvideo.h:523
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:407
enum OutputFormat out_format
output format
Definition: mpegvideo.h:101
attribute_deprecated int i_count
Definition: avcodec.h:2827
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:117
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:334
int noise_reduction
Definition: mpegvideo.h:566
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:204
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
uint16_t * chroma_intra_matrix
custom intra quantization matrix Code outside libavcodec should access this field using av_codec_g/se...
Definition: avcodec.h:3482
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:224
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Multithreading support functions.
int pre_dia_size
ME prepass diamond size & shape.
Definition: avcodec.h:2163
AVCodec ff_h263_encoder
int frame_skip_threshold
Definition: mpegvideo.h:560
static const AVOption h263p_options[]
static int get_sae(uint8_t *src, int ref, int stride)
#define FF_CMP_VSSE
Definition: avcodec.h:2122
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:878
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:455
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:383
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:268
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:352
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1791
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Definition: mpegvideo.h:284
H.263 tables.
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:118
int interlaced_dct
Definition: mpegvideo.h:480
int(* q_chroma_intra_matrix)[64]
Definition: mpegvideo.h:324
int me_cmp
motion estimation comparison function
Definition: avcodec.h:2094
void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2720
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:70
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:177
#define CHROMA_420
Definition: mpegvideo.h:472
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:441
int intra_dc_precision
Definition: mpegvideo.h:460
int repeat_first_field
Definition: mpegvideo.h:469
static AVFrame * frame
quarterpel DSP functions
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:248
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
uint8_t * data
Definition: avcodec.h:1601
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:74
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:75
#define ff_dlog(a,...)
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:390
me_cmp_func nsse[6]
Definition: me_cmp.h:65
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
#define CODEC_FLAG_MV0
Definition: avcodec.h:1080
const uint8_t * scantable
Definition: idctdsp.h:30
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:330
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:126
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:71
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: avcodec.h:1332
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:2027
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2845
int max_qcoeff
maximum encodable coefficient
Definition: mpegvideo.h:309
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:861
high precision timer, useful to profile code
static void update_noise_reduction(MpegEncContext *s)
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:572
int scenechange_threshold
Definition: mpegvideo.h:565
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:4067
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
Definition: mpeg12enc.c:993
#define FFALIGN(x, a)
Definition: macros.h:48
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:310
#define MAX_LEVEL
Definition: rl.h:36
attribute_deprecated int frame_skip_threshold
Definition: avcodec.h:2755
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:53
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:207
int flipflop_rounding
Definition: mpegvideo.h:434
#define CHROMA_444
Definition: mpegvideo.h:474
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:64
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & MPEG-1 specific
Definition: mpegvideo.h:448
uint8_t * mb_info_ptr
Definition: mpegvideo.h:369
#define av_log(a,...)
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:730
#define ff_sqrt
Definition: mathops.h:215
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2802
#define ROUNDED_DIV(a, b)
Definition: common.h:56
int(* q_inter_matrix)[64]
Definition: mpegvideo.h:325
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:2898
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1633
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:744
attribute_deprecated int skip_count
Definition: avcodec.h:2831
#define EDGE_WIDTH
Definition: mpegpicture.h:33
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
Definition: mpegvideo.h:323
#define FF_MPV_FLAG_MV0
Definition: mpegvideo.h:575
int intra_only
if true, only intra pictures are generated
Definition: mpegvideo.h:99
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
int16_t * dc_val[3]
used for MPEG-4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:184
enum AVCodecID id
Definition: avcodec.h:3614
int h263_plus
H.263+ headers.
Definition: mpegvideo.h:106
H263DSPContext h263dsp
Definition: mpegvideo.h:234
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:153
#define MAX_DMV
Definition: motion_est.h:37
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideo.h:212
int width
width and height of the video frame
Definition: frame.h:236
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:227
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1998
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:182
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
attribute_deprecated float rc_initial_cplx
Definition: avcodec.h:2693
uint8_t * inter_ac_vlc_last_length
Definition: mpegvideo.h:316
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:837
#define MAX_MB_BYTES
Definition: mpegutils.h:47
int64_t total_bits
Definition: mpegvideo.h:337
#define PTRDIFF_SPECIFIER
Definition: internal.h:251
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:192
int chroma_y_shift
Definition: mpegvideo.h:476
int strict_std_compliance
strictly follow the std (MPEG-4, ...)
Definition: mpegvideo.h:115
av_default_item_name
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:403
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
Definition: mpegpicture.h:37
#define AVERROR(e)
Definition: error.h:43
uint64_t encoding_error[AV_NUM_DATA_POINTERS]
Definition: mpegpicture.h:90
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:84
int me_sub_cmp
subpixel motion estimation comparison function
Definition: avcodec.h:2100
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:158
attribute_deprecated uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
Definition: avcodec.h:3333
int qmax
maximum quantizer
Definition: avcodec.h:2626
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2294
static void update_mb_info(MpegEncContext *s, int startcode)
#define MERGE(field)
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:42
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:220
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
ERContext er
Definition: mpegvideo.h:550
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideo.h:216
static int sse_mb(MpegEncContext *s)
int reference
Definition: mpegpicture.h:87
const char * r
Definition: vf_curves.c:111
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:511
PixblockDSPContext pdsp
Definition: mpegvideo.h:231
const char * arg
Definition: jacosubdec.c:66
uint8_t * intra_chroma_ac_vlc_length
Definition: mpegvideo.h:313
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:505
int h263_slice_structured
Definition: mpegvideo.h:377
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1771
uint8_t * buf
Definition: put_bits.h:38
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
GLsizei GLsizei * length
Definition: opengl_enc.c:115
MpegvideoEncDSPContext mpvencdsp
Definition: mpegvideo.h:230
attribute_deprecated int inter_quant_bias
Definition: avcodec.h:2211
const char * name
Name of the codec implementation.
Definition: avcodec.h:3607
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:399
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:291
int me_pre
prepass for motion estimation
Definition: mpegvideo.h:260
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:519
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:404
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:73
uint8_t *[2][2] b_field_select_table
Definition: mpegvideo.h:254
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
GLsizei count
Definition: opengl_enc.c:109
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1115
#define FFMAX(a, b)
Definition: common.h:94
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
#define fail()
Definition: checkasm.h:83
int64_t mb_var_sum_temp
Definition: motion_est.h:86
attribute_deprecated int b_sensitivity
Definition: avcodec.h:2384
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1607
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
static void frame_end(MpegEncContext *s)
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:356
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:2653
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2...
Definition: ituh263enc.c:266
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:123
int * lambda_table
Definition: mpegvideo.h:205
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:2287
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:147
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:2683
common internal API header
uint8_t * intra_ac_vlc_last_length
Definition: mpegvideo.h:312
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideodata.c:82
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:135
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
#define CHROMA_422
Definition: mpegvideo.h:473
float border_masking
Definition: mpegvideo.h:537
int progressive_frame
Definition: mpegvideo.h:478
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:262
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:833
#define FFMIN(a, b)
Definition: common.h:96
int display_picture_number
picture number in display order
Definition: frame.h:293
uint16_t(* q_inter_matrix16)[2][64]
Definition: mpegvideo.h:329
uint8_t * vbv_delay_ptr
pointer to vbv_delay in the bitstream
Definition: mpegvideo.h:450
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideo.h:110
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:74
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:891
int me_method
ME algorithm.
Definition: mpegvideo.h:256
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
int umvplus
== H.263+ && unrestricted_mv
Definition: mpegvideo.h:375
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:171
int intra_quant_bias
bias for the quantizer
Definition: mpegvideo.h:306
int width
picture width / height.
Definition: avcodec.h:1863
int(* pix_sum)(uint8_t *pix, int line_size)
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:181
Picture.
Definition: mpegpicture.h:45
attribute_deprecated int noise_reduction
Definition: avcodec.h:2265
int alternate_scan
Definition: mpegvideo.h:467
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow. ...
Definition: avcodec.h:2701
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:32
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:865
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:753
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:849
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX & these are not permutated, second 64 entries are bias ...
Definition: mpegvideo.h:327
attribute_deprecated int frame_skip_factor
Definition: avcodec.h:2759
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:2240
perm
Definition: f_perms.c:74
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:298
int(* ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2]
[mb_intra][isChroma][level][run][last]
Definition: mpegvideo.h:440
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:83
MotionEstContext me
Definition: mpegvideo.h:282
int frame_skip_factor
Definition: mpegvideo.h:561
int n
Definition: avisynth_c.h:684
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:94
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:348
int mb_decision
macroblock decision mode
Definition: avcodec.h:2239
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
attribute_deprecated float rc_qsquish
Definition: avcodec.h:2640
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
Definition: mpegvideo.h:195
#define src
Definition: vp9dsp.c:530
#define MAX_B_FRAMES
Definition: mpegvideo.h:63
int ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:121
int ac_esc_length
num of bits needed to encode the longest esc
Definition: mpegvideo.h:310
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:196
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:261
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:3107
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:293
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:460
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
Definition: mpegvideo.h:297
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:82
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1026
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:248
AVCodec ff_h263p_encoder
attribute_deprecated int i_tex_bits
Definition: avcodec.h:2823
static void build_basis(uint8_t *perm)
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
int first_slice_line
used in MPEG-4 too to handle resync markers
Definition: mpegvideo.h:433
int frame_pred_frame_dct
Definition: mpegvideo.h:461
attribute_deprecated int misc_bits
Definition: avcodec.h:2833
This structure describes the bitrate properties of an encoded bitstream.
Definition: avcodec.h:1311
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegpicture.h:68
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:27
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
int coded_picture_number
picture number in bitstream order
Definition: frame.h:289
#define src1
Definition: h264pred.c:139
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
uint16_t inter_matrix[64]
Definition: mpegvideo.h:302
#define FF_LAMBDA_SCALE
Definition: avutil.h:220
void ff_jpeg_fdct_islow_8(int16_t *data)
int64_t last_non_b_time
Definition: mpegvideo.h:389
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:237
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:73
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:152
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:75
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:267
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:204
#define CODEC_FLAG_NORMALIZE_AQP
Definition: avcodec.h:1107
void ff_faandct(int16_t *data)
Definition: faandct.c:122
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
Libavcodec external API header.
attribute_deprecated int mpeg_quant
Definition: avcodec.h:2003
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:42
int h263_flv
use flv H.263 header
Definition: mpegvideo.h:107
attribute_deprecated int scenechange_threshold
Definition: avcodec.h:2261
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
static const AVClass h263p_class
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:131
enum AVCodecID codec_id
Definition: avcodec.h:1693
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:72
attribute_deprecated int prediction_method
Definition: avcodec.h:2067
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:89
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
attribute_deprecated int b_frame_strategy
Definition: avcodec.h:1982
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:440
#define START_TIMER
Definition: timer.h:94
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint8_t * intra_chroma_ac_vlc_last_length
Definition: mpegvideo.h:314
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
main external API structure.
Definition: avcodec.h:1676
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:228
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:567
ScanTable intra_scantable
Definition: mpegvideo.h:88
int qmin
minimum quantizer
Definition: avcodec.h:2619
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:97
#define FF_CMP_NSSE
Definition: avcodec.h:2123
#define FF_DEFAULT_QUANT_BIAS
Definition: avcodec.h:2206
static void write_slice_end(MpegEncContext *s)
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideo.h:141
int64_t user_specified_pts
last non-zero pts from AVFrame which was passed into avcodec_encode_video2()
Definition: mpegvideo.h:137
FDCTDSPContext fdsp
Definition: mpegvideo.h:224
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
uint8_t * buf_end
Definition: put_bits.h:38
static int frame_start(MpegEncContext *s)
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:2041
float rc_qmod_amp
Definition: mpegvideo.h:533
int luma_elim_threshold
Definition: mpegvideo.h:113
attribute_deprecated int header_bits
Definition: avcodec.h:2821
GLint GLenum type
Definition: opengl_enc.c:105
void ff_fix_long_p_mvs(MpegEncContext *s)
Definition: motion_est.c:1671
Picture * picture
main picture buffer
Definition: mpegvideo.h:133
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:402
uint8_t * inter_ac_vlc_length
Definition: mpegvideo.h:315
int progressive_sequence
Definition: mpegvideo.h:453
uint16_t * intra_matrix
custom intra quantization matrix
Definition: avcodec.h:2249
H.261 codec.
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:240
uint8_t * buf_ptr
Definition: put_bits.h:38
Describe the class of an AVClass context structure.
Definition: log.h:67
int stuffing_bits
bits used for stuffing
Definition: mpegvideo.h:339
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:80
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame encoding.
Definition: mpegvideo.h:252
int(* pix_norm1)(uint8_t *pix, int line_size)
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegpicture.h:82
int index
Definition: gxfenc.c:89
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:111
attribute_deprecated int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
Definition: utils.c:1954
struct AVFrame * f
Definition: mpegpicture.h:46
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:2242
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:295
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
int input_picture_number
used to set pic->display_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:122
AVCodec ff_wmv1_encoder
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:132
int mb_info
interval for outputting info about mb offsets as side data
Definition: mpegvideo.h:367
void ff_set_mpeg4_time(MpegEncContext *s)
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
attribute_deprecated int brd_scale
Definition: avcodec.h:2342
#define STRIDE_ALIGN
Definition: internal.h:82
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:116
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1241
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: utils.c:1722
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:119
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:337
int f_code
forward MV resolution
Definition: mpegvideo.h:235
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1081
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:121
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:117
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
attribute_deprecated int p_tex_bits
Definition: avcodec.h:2825
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1506
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
uint16_t * inter_matrix
custom inter quantization matrix
Definition: avcodec.h:2256
int max_b_frames
max number of B-frames for encoding
Definition: mpegvideo.h:112
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:209
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
int last_mv_dir
last mv_dir, used for B-frame encoding
Definition: mpegvideo.h:449
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:275
int h263_pred
use MPEG-4/H.263 ac/dc predictions
Definition: mpegvideo.h:102
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:249
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:493
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:2055
static int64_t pts
Global timestamp for the audio frames.
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:2034
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
uint8_t *[2] p_field_select_table
Definition: mpegvideo.h:253
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame encoding.
Definition: mpegvideo.h:250
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:845
AAN (Arai, Agui and Nakajima) (I)DCT tables.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:186
uint8_t level
Definition: svq3.c:207
me_cmp_func sad[6]
Definition: me_cmp.h:56
int me_penalty_compensation
Definition: mpegvideo.h:259
int64_t mc_mb_var_sum_temp
Definition: motion_est.h:85
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame encoding.
Definition: mpegvideo.h:246
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:128
me_cmp_func sse[6]
Definition: me_cmp.h:57
static int estimate_motion_thread(AVCodecContext *c, void *arg)
int vbv_ignore_qmax
Definition: mpegvideo.h:539
#define BASIS_SHIFT
MpegEncContext.
Definition: mpegvideo.h:78
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:180
char * rc_eq
Definition: mpegvideo.h:541
int8_t * qscale_table
Definition: mpegpicture.h:50
#define MAX_RUN
Definition: rl.h:35
struct AVCodecContext * avctx
Definition: mpegvideo.h:95
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1889
PutBitContext pb
bit output
Definition: mpegvideo.h:148
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:291
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
volatile int error_count
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
static void update_qscale(MpegEncContext *s)
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:2106
int quantizer_noise_shaping
Definition: mpegvideo.h:526
int(* dct_error_sum)[64]
Definition: mpegvideo.h:332
MECmpContext mecc
Definition: mpegvideo.h:228
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:284
float rc_initial_cplx
Definition: mpegvideo.h:535
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:80
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:127
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
if(ret< 0)
Definition: vf_mcdeint.c:282
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:112
attribute_deprecated int rtp_payload_size
Definition: avcodec.h:2808
uint8_t * dest[3]
Definition: mpegvideo.h:295
int shared
Definition: mpegpicture.h:88
static double c[64]
int last_pict_type
Definition: mpegvideo.h:211
#define COPY(a)
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:447
int adaptive_quant
use adaptive quantization
Definition: mpegvideo.h:206
static int16_t basis[64][64]
attribute_deprecated float border_masking
Definition: avcodec.h:2308
static int score_tab[256]
Definition: zmbvenc.c:59
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:159
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:179
Bi-dir predicted.
Definition: avutil.h:270
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:76
float rc_qsquish
ratecontrol qmin qmax limiting method 0-> clipping, 1-> use a nice continuous function to limit qscal...
Definition: mpegvideo.h:532
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideo.h:145
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:3098
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:326
#define H263_GOB_HEIGHT(h)
Definition: h263.h:44
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
int den
Denominator.
Definition: rational.h:60
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_YASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
attribute_deprecated float rc_qmod_amp
Definition: avcodec.h:2643
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (H.263)
Definition: mpegvideo.h:187
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
AVCodec ff_msmpeg4v3_encoder
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:455
int trellis
trellis RD quantization
Definition: avcodec.h:2775
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:507
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:4081
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:734
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:853
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg12enc.c:421
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:106
int16_t(* blocks)[12][64]
Definition: mpegvideo.h:498
#define STOP_TIMER(id)
Definition: timer.h:95
int slices
Number of slices.
Definition: avcodec.h:2429
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
void * priv_data
Definition: avcodec.h:1718
const AVOption ff_mpv_generic_options[]
Definition: mpegvideo_enc.c:84
#define PICT_FRAME
Definition: mpegutils.h:39
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:353
void ff_mpeg4_init_partitions(MpegEncContext *s)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:877
void(* diff_pixels)(int16_t *block, const uint8_t *s1, const uint8_t *s2, int stride)
Definition: pixblockdsp.h:30
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int picture_structure
Definition: mpegvideo.h:457
int dia_size
ME diamond size & shape.
Definition: avcodec.h:2136
#define av_free(p)
attribute_deprecated int frame_bits
Definition: avcodec.h:2837
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:3147
VideoDSPContext vdsp
Definition: mpegvideo.h:233
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:81
#define VE
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
attribute_deprecated int me_penalty_compensation
Definition: avcodec.h:2330
int avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: avcodec.h:1326
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1618
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:357
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1726
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:497
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:179
int64_t bit_rate
wanted bit rate
Definition: mpegvideo.h:100
This side data corresponds to the AVCPBProperties struct.
Definition: avcodec.h:1462
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:406
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:165
attribute_deprecated int p_count
Definition: avcodec.h:2829
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:253
attribute_deprecated int error_rate
Definition: avcodec.h:3320
static void set_frame_distances(MpegEncContext *s)
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:221
#define EDGE_BOTTOM
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1720
Picture ** reordered_input_picture
pointer to the next pictures in coded order for encoding
Definition: mpegvideo.h:135
static const struct twinvq_data tab
unsigned int byte_buffer_size
Definition: internal.h:150
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:35
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1600
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:509
static int encode_thread(AVCodecContext *c, void *arg)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
Definition: mpegvideo.c:634
int height
Definition: frame.h:236
int(* fast_dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:522
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
#define LOCAL_ALIGNED_16(t, v,...)
Definition: internal.h:121
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
#define av_freep(p)
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
void INT64 start
Definition: avisynth_c.h:690
#define av_always_inline
Definition: attributes.h:39
#define M_PI
Definition: mathematics.h:52
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
int rtp_payload_size
Definition: mpegvideo.h:487
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:896
Floating point AAN DCT
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:81
int inter_quant_bias
bias for the quantizer
Definition: mpegvideo.h:307
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
attribute_deprecated int lmin
Definition: avcodec.h:2743
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:113
#define stride
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:317
int(* dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:521
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:236
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:381
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:365
int dct_count[2]
Definition: mpegvideo.h:333
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegpicture.h:81
static int encode_frame(AVCodecContext *c, AVFrame *frame)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1578
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:149
int delay
Codec delay.
Definition: avcodec.h:1846
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2894
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1594
int ff_check_alignment(void)
Definition: me_cmp.c:988
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:589
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:139
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:3777
me_cmp_func ildct_cmp[6]
Definition: me_cmp.h:75
#define FFMAX3(a, b, c)
Definition: common.h:95
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:242
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Predicted.
Definition: avutil.h:269
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:203
AVCodec ff_msmpeg4v2_encoder
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:2676
uint16_t pb_time
time distance between the last b and p,s,i frame
Definition: mpegvideo.h:391
enum idct_permutation_type perm_type
Definition: idctdsp.h:95
attribute_deprecated int pre_me
Definition: avcodec.h:2148
HpelDSPContext hdsp
Definition: mpegvideo.h:226
static const uint8_t sp5x_quant_table[20][64]
Definition: sp5x.h:135
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideo.h:340