FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <stdint.h>
37 
38 #include "libavutil/emms.h"
39 #include "libavutil/internal.h"
40 #include "libavutil/intmath.h"
41 #include "libavutil/mathematics.h"
42 #include "libavutil/mem.h"
43 #include "libavutil/mem_internal.h"
44 #include "libavutil/opt.h"
45 #include "libavutil/thread.h"
46 #include "avcodec.h"
47 #include "encode.h"
48 #include "idctdsp.h"
49 #include "mpeg12codecs.h"
50 #include "mpeg12data.h"
51 #include "mpeg12enc.h"
52 #include "mpegvideo.h"
53 #include "mpegvideodata.h"
54 #include "mpegvideoenc.h"
55 #include "h261enc.h"
56 #include "h263.h"
57 #include "h263data.h"
58 #include "h263enc.h"
59 #include "mjpegenc_common.h"
60 #include "mathops.h"
61 #include "mpegutils.h"
62 #include "mjpegenc.h"
63 #include "speedhqenc.h"
64 #include "msmpeg4enc.h"
65 #include "pixblockdsp.h"
66 #include "qpeldsp.h"
67 #include "faandct.h"
68 #include "aandcttab.h"
69 #include "flvenc.h"
70 #include "mpeg4video.h"
71 #include "mpeg4videodata.h"
72 #include "mpeg4videoenc.h"
73 #include "internal.h"
74 #include "bytestream.h"
75 #include "wmv2enc.h"
76 #include "rv10enc.h"
77 #include "packet_internal.h"
78 #include "refstruct.h"
79 #include <limits.h>
80 #include "sp5x.h"
81 
82 #define QUANT_BIAS_SHIFT 8
83 
84 #define QMAT_SHIFT_MMX 16
85 #define QMAT_SHIFT 21
86 
87 static int encode_picture(MpegEncContext *s, const AVPacket *pkt);
88 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
89 static int sse_mb(MpegEncContext *s);
90 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
91 static int dct_quantize_c(MpegEncContext *s,
92  int16_t *block, int n,
93  int qscale, int *overflow);
94 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
95 
96 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
97 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
98 
99 static const AVOption mpv_generic_options[] = {
102  { NULL },
103 };
104 
106  .class_name = "generic mpegvideo encoder",
107  .item_name = av_default_item_name,
108  .option = mpv_generic_options,
109  .version = LIBAVUTIL_VERSION_INT,
110 };
111 
112 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
113  uint16_t (*qmat16)[2][64],
114  const uint16_t *quant_matrix,
115  int bias, int qmin, int qmax, int intra)
116 {
117  FDCTDSPContext *fdsp = &s->fdsp;
118  int qscale;
119  int shift = 0;
120 
121  for (qscale = qmin; qscale <= qmax; qscale++) {
122  int i;
123  int qscale2;
124 
125  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
126  else qscale2 = qscale << 1;
127 
128  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
129 #if CONFIG_FAANDCT
130  fdsp->fdct == ff_faandct ||
131 #endif /* CONFIG_FAANDCT */
133  for (i = 0; i < 64; i++) {
134  const int j = s->idsp.idct_permutation[i];
135  int64_t den = (int64_t) qscale2 * quant_matrix[j];
136  /* 16 <= qscale * quant_matrix[i] <= 7905
137  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
138  * 19952 <= x <= 249205026
139  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
140  * 3444240 >= (1 << 36) / (x) >= 275 */
141 
142  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
143  }
144  } else if (fdsp->fdct == ff_fdct_ifast) {
145  for (i = 0; i < 64; i++) {
146  const int j = s->idsp.idct_permutation[i];
147  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
148  /* 16 <= qscale * quant_matrix[i] <= 7905
149  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
150  * 19952 <= x <= 249205026
151  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
152  * 3444240 >= (1 << 36) / (x) >= 275 */
153 
154  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
155  }
156  } else {
157  for (i = 0; i < 64; i++) {
158  const int j = s->idsp.idct_permutation[i];
159  int64_t den = (int64_t) qscale2 * quant_matrix[j];
160  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
161  * Assume x = qscale * quant_matrix[i]
162  * So 16 <= x <= 7905
163  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
164  * so 32768 >= (1 << 19) / (x) >= 67 */
165  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
166  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
167  // (qscale * quant_matrix[i]);
168  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
169 
170  if (qmat16[qscale][0][i] == 0 ||
171  qmat16[qscale][0][i] == 128 * 256)
172  qmat16[qscale][0][i] = 128 * 256 - 1;
173  qmat16[qscale][1][i] =
174  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
175  qmat16[qscale][0][i]);
176  }
177  }
178 
179  for (i = intra; i < 64; i++) {
180  int64_t max = 8191;
181  if (fdsp->fdct == ff_fdct_ifast) {
182  max = (8191LL * ff_aanscales[i]) >> 14;
183  }
184  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
185  shift++;
186  }
187  }
188  }
189  if (shift) {
190  av_log(s->avctx, AV_LOG_INFO,
191  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
192  QMAT_SHIFT - shift);
193  }
194 }
195 
196 static inline void update_qscale(MpegEncContext *s)
197 {
198  if (s->q_scale_type == 1 && 0) {
199  int i;
200  int bestdiff=INT_MAX;
201  int best = 1;
202 
203  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
204  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
205  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
206  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
207  continue;
208  if (diff < bestdiff) {
209  bestdiff = diff;
210  best = i;
211  }
212  }
213  s->qscale = best;
214  } else {
215  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
216  (FF_LAMBDA_SHIFT + 7);
217  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
218  }
219 
220  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
222 }
223 
225 {
226  int i;
227 
228  if (matrix) {
229  put_bits(pb, 1, 1);
230  for (i = 0; i < 64; i++) {
232  }
233  } else
234  put_bits(pb, 1, 0);
235 }
236 
237 /**
238  * init s->cur_pic.qscale_table from s->lambda_table
239  */
241 {
242  int8_t * const qscale_table = s->cur_pic.qscale_table;
243  int i;
244 
245  for (i = 0; i < s->mb_num; i++) {
246  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
247  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
248  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
249  s->avctx->qmax);
250  }
251 }
252 
254  const MpegEncContext *src)
255 {
256 #define COPY(a) dst->a= src->a
257  COPY(pict_type);
258  COPY(f_code);
259  COPY(b_code);
260  COPY(qscale);
261  COPY(lambda);
262  COPY(lambda2);
263  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
264  COPY(progressive_frame); // FIXME don't set in encode_header
265  COPY(partitioned_frame); // FIXME don't set in encode_header
266 #undef COPY
267 }
268 
269 static void mpv_encode_init_static(void)
270 {
271  for (int i = -16; i < 16; i++)
272  default_fcode_tab[i + MAX_MV] = 1;
273 }
274 
275 /**
276  * Set the given MpegEncContext to defaults for encoding.
277  * the changed fields will not depend upon the prior state of the MpegEncContext.
278  */
280 {
281  static AVOnce init_static_once = AV_ONCE_INIT;
282 
284 
285  ff_thread_once(&init_static_once, mpv_encode_init_static);
286 
287  s->me.mv_penalty = default_mv_penalty;
288  s->fcode_tab = default_fcode_tab;
289 
290  s->input_picture_number = 0;
291  s->picture_in_gop_number = 0;
292 }
293 
295 {
296  s->dct_quantize = dct_quantize_c;
297  s->denoise_dct = denoise_dct_c;
298 
299 #if ARCH_MIPS
301 #elif ARCH_X86
303 #endif
304 
305  if (s->avctx->trellis)
306  s->dct_quantize = dct_quantize_trellis_c;
307 }
308 
310 {
311  MECmpContext mecc;
312  me_cmp_func me_cmp[6];
313  int ret;
314 
315  ff_me_cmp_init(&mecc, avctx);
316  ret = ff_me_init(&s->me, avctx, &mecc, 1);
317  if (ret < 0)
318  return ret;
319  ret = ff_set_cmp(&mecc, me_cmp, s->frame_skip_cmp, 1);
320  if (ret < 0)
321  return ret;
322  s->frame_skip_cmp_fn = me_cmp[1];
324  ret = ff_set_cmp(&mecc, me_cmp, avctx->ildct_cmp, 1);
325  if (ret < 0)
326  return ret;
327  if (!me_cmp[0] || !me_cmp[4])
328  return AVERROR(EINVAL);
329  s->ildct_cmp[0] = me_cmp[0];
330  s->ildct_cmp[1] = me_cmp[4];
331  }
332 
333  s->sum_abs_dctelem = mecc.sum_abs_dctelem;
334 
335  s->sse_cmp[0] = mecc.sse[0];
336  s->sse_cmp[1] = mecc.sse[1];
337  s->sad_cmp[0] = mecc.sad[0];
338  s->sad_cmp[1] = mecc.sad[1];
339  if (avctx->mb_cmp == FF_CMP_NSSE) {
340  s->n_sse_cmp[0] = mecc.nsse[0];
341  s->n_sse_cmp[1] = mecc.nsse[1];
342  } else {
343  s->n_sse_cmp[0] = mecc.sse[0];
344  s->n_sse_cmp[1] = mecc.sse[1];
345  }
346 
347  return 0;
348 }
349 
350 /* init video encoder */
352 {
354  AVCPBProperties *cpb_props;
355  int i, ret;
356  int mb_array_size, mv_table_size;
357 
359 
360  switch (avctx->pix_fmt) {
361  case AV_PIX_FMT_YUVJ444P:
362  case AV_PIX_FMT_YUV444P:
363  s->chroma_format = CHROMA_444;
364  break;
365  case AV_PIX_FMT_YUVJ422P:
366  case AV_PIX_FMT_YUV422P:
367  s->chroma_format = CHROMA_422;
368  break;
369  case AV_PIX_FMT_YUVJ420P:
370  case AV_PIX_FMT_YUV420P:
371  default:
372  s->chroma_format = CHROMA_420;
373  break;
374  }
375 
377 
378  s->bit_rate = avctx->bit_rate;
379  s->width = avctx->width;
380  s->height = avctx->height;
381  if (avctx->gop_size > 600 &&
384  "keyframe interval too large!, reducing it from %d to %d\n",
385  avctx->gop_size, 600);
386  avctx->gop_size = 600;
387  }
388  s->gop_size = avctx->gop_size;
389  s->avctx = avctx;
391  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
392  "is %d.\n", MAX_B_FRAMES);
394  } else if (avctx->max_b_frames < 0) {
396  "max b frames must be 0 or positive for mpegvideo based encoders\n");
397  return AVERROR(EINVAL);
398  }
399  s->max_b_frames = avctx->max_b_frames;
400  s->codec_id = avctx->codec->id;
401  if (s->max_b_frames && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
402  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
403  return AVERROR(EINVAL);
404  }
405 
406  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
407  s->rtp_mode = !!s->rtp_payload_size;
408  s->intra_dc_precision = avctx->intra_dc_precision;
409 
410  // workaround some differences between how applications specify dc precision
411  if (s->intra_dc_precision < 0) {
412  s->intra_dc_precision += 8;
413  } else if (s->intra_dc_precision >= 8)
414  s->intra_dc_precision -= 8;
415 
416  if (s->intra_dc_precision < 0) {
418  "intra dc precision must be positive, note some applications use"
419  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
420  return AVERROR(EINVAL);
421  }
422 
423  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
424  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
425  return AVERROR(EINVAL);
426  }
427  s->user_specified_pts = AV_NOPTS_VALUE;
428 
429  if (s->gop_size <= 1) {
430  s->intra_only = 1;
431  s->gop_size = 12;
432  } else {
433  s->intra_only = 0;
434  }
435 
436  /* Fixed QSCALE */
437  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
438 
439  s->adaptive_quant = (avctx->lumi_masking ||
440  avctx->dark_masking ||
443  avctx->p_masking ||
444  s->border_masking ||
445  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
446  !s->fixed_qscale;
447 
448  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
449 
451  switch(avctx->codec_id) {
454  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
455  break;
456  case AV_CODEC_ID_MPEG4:
460  if (avctx->rc_max_rate >= 15000000) {
461  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
462  } else if(avctx->rc_max_rate >= 2000000) {
463  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
464  } else if(avctx->rc_max_rate >= 384000) {
465  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
466  } else
467  avctx->rc_buffer_size = 40;
468  avctx->rc_buffer_size *= 16384;
469  break;
470  }
471  if (avctx->rc_buffer_size) {
472  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
473  }
474  }
475 
476  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
477  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
478  return AVERROR(EINVAL);
479  }
480 
483  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
484  }
485 
487  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
488  return AVERROR(EINVAL);
489  }
490 
492  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
493  return AVERROR(EINVAL);
494  }
495 
496  if (avctx->rc_max_rate &&
500  "impossible bitrate constraints, this will fail\n");
501  }
502 
503  if (avctx->rc_buffer_size &&
506  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
507  return AVERROR(EINVAL);
508  }
509 
510  if (!s->fixed_qscale &&
513  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
515  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
516  if (nbt <= INT_MAX) {
517  avctx->bit_rate_tolerance = nbt;
518  } else
519  avctx->bit_rate_tolerance = INT_MAX;
520  }
521 
522  if (avctx->rc_max_rate &&
524  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
525  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
526  90000LL * (avctx->rc_buffer_size - 1) >
527  avctx->rc_max_rate * 0xFFFFLL) {
529  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
530  "specified vbv buffer is too large for the given bitrate!\n");
531  }
532 
533  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
534  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
535  s->codec_id != AV_CODEC_ID_FLV1) {
536  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
537  return AVERROR(EINVAL);
538  }
539 
540  if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
542  "OBMC is only supported with simple mb decision\n");
543  return AVERROR(EINVAL);
544  }
545 
546  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
547  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
548  return AVERROR(EINVAL);
549  }
550 
551  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
552  s->codec_id == AV_CODEC_ID_H263 ||
553  s->codec_id == AV_CODEC_ID_H263P) &&
554  (avctx->sample_aspect_ratio.num > 255 ||
555  avctx->sample_aspect_ratio.den > 255)) {
557  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
561  }
562 
563  if ((s->codec_id == AV_CODEC_ID_H263 ||
564  s->codec_id == AV_CODEC_ID_H263P) &&
565  (avctx->width > 2048 ||
566  avctx->height > 1152 )) {
567  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
568  return AVERROR(EINVAL);
569  }
570  if (s->codec_id == AV_CODEC_ID_FLV1 &&
571  (avctx->width > 65535 ||
572  avctx->height > 65535 )) {
573  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
574  return AVERROR(EINVAL);
575  }
576  if ((s->codec_id == AV_CODEC_ID_H263 ||
577  s->codec_id == AV_CODEC_ID_H263P ||
578  s->codec_id == AV_CODEC_ID_RV20) &&
579  ((avctx->width &3) ||
580  (avctx->height&3) )) {
581  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
582  return AVERROR(EINVAL);
583  }
584 
585  if (s->codec_id == AV_CODEC_ID_RV10 &&
586  (avctx->width &15 ||
587  avctx->height&15 )) {
588  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
589  return AVERROR(EINVAL);
590  }
591 
592  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
593  s->codec_id == AV_CODEC_ID_WMV2) &&
594  avctx->width & 1) {
595  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
596  return AVERROR(EINVAL);
597  }
598 
600  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
601  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
602  return AVERROR(EINVAL);
603  }
604 
605  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
606  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
607  return AVERROR(EINVAL);
608  }
609 
610  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
612  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
613  return AVERROR(EINVAL);
614  }
615 
616  if (s->scenechange_threshold < 1000000000 &&
619  "closed gop with scene change detection are not supported yet, "
620  "set threshold to 1000000000\n");
621  return AVERROR_PATCHWELCOME;
622  }
623 
625  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
628  "low delay forcing is only available for mpeg2, "
629  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
630  return AVERROR(EINVAL);
631  }
632  if (s->max_b_frames != 0) {
634  "B-frames cannot be used with low delay\n");
635  return AVERROR(EINVAL);
636  }
637  }
638 
639  if (s->q_scale_type == 1) {
640  if (avctx->qmax > 28) {
642  "non linear quant only supports qmax <= 28 currently\n");
643  return AVERROR_PATCHWELCOME;
644  }
645  }
646 
647  if (avctx->slices > 1 &&
649  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
650  return AVERROR(EINVAL);
651  }
652 
653  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
655  "notice: b_frame_strategy only affects the first pass\n");
656  s->b_frame_strategy = 0;
657  }
658 
660  if (i > 1) {
661  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
662  avctx->time_base.den /= i;
663  avctx->time_base.num /= i;
664  //return -1;
665  }
666 
667  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
668  // (a + x * 3 / 8) / x
669  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
670  s->inter_quant_bias = 0;
671  } else {
672  s->intra_quant_bias = 0;
673  // (a - x / 4) / x
674  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
675  }
676 
677  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
678  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
679  return AVERROR(EINVAL);
680  }
681 
682  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
683 
684  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
685  avctx->time_base.den > (1 << 16) - 1) {
687  "timebase %d/%d not supported by MPEG 4 standard, "
688  "the maximum admitted value for the timebase denominator "
689  "is %d\n", avctx->time_base.num, avctx->time_base.den,
690  (1 << 16) - 1);
691  return AVERROR(EINVAL);
692  }
693  s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
694 
695  switch (avctx->codec->id) {
696 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
698  s->rtp_mode = 1;
699  /* fallthrough */
701  s->out_format = FMT_MPEG1;
702  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
703  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
705  break;
706 #endif
707 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
708  case AV_CODEC_ID_MJPEG:
709  case AV_CODEC_ID_AMV:
710  s->out_format = FMT_MJPEG;
711  s->intra_only = 1; /* force intra only for jpeg */
712  if ((ret = ff_mjpeg_encode_init(s)) < 0)
713  return ret;
714  avctx->delay = 0;
715  s->low_delay = 1;
716  break;
717 #endif
718  case AV_CODEC_ID_SPEEDHQ:
719  s->out_format = FMT_SPEEDHQ;
720  s->intra_only = 1; /* force intra only for SHQ */
721  if (!CONFIG_SPEEDHQ_ENCODER)
723  if ((ret = ff_speedhq_encode_init(s)) < 0)
724  return ret;
725  avctx->delay = 0;
726  s->low_delay = 1;
727  break;
728  case AV_CODEC_ID_H261:
729  if (!CONFIG_H261_ENCODER)
732  if (ret < 0)
733  return ret;
734  s->out_format = FMT_H261;
735  avctx->delay = 0;
736  s->low_delay = 1;
737  s->rtp_mode = 0; /* Sliced encoding not supported */
738  break;
739  case AV_CODEC_ID_H263:
740  if (!CONFIG_H263_ENCODER)
743  s->width, s->height) == 8) {
745  "The specified picture size of %dx%d is not valid for "
746  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
747  "352x288, 704x576, and 1408x1152. "
748  "Try H.263+.\n", s->width, s->height);
749  return AVERROR(EINVAL);
750  }
751  s->out_format = FMT_H263;
752  avctx->delay = 0;
753  s->low_delay = 1;
754  break;
755  case AV_CODEC_ID_H263P:
756  s->out_format = FMT_H263;
757  s->h263_plus = 1;
758  /* Fx */
759  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
760  s->modified_quant = s->h263_aic;
761  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
762  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
763  s->flipflop_rounding = 1;
764 
765  /* /Fx */
766  /* These are just to be sure */
767  avctx->delay = 0;
768  s->low_delay = 1;
769  break;
770  case AV_CODEC_ID_FLV1:
771  s->out_format = FMT_H263;
772  s->h263_flv = 2; /* format = 1; 11-bit codes */
773  s->unrestricted_mv = 1;
774  s->rtp_mode = 0; /* don't allow GOB */
775  avctx->delay = 0;
776  s->low_delay = 1;
777  break;
778  case AV_CODEC_ID_RV10:
779  s->out_format = FMT_H263;
780  avctx->delay = 0;
781  s->low_delay = 1;
782  break;
783  case AV_CODEC_ID_RV20:
784  s->out_format = FMT_H263;
785  avctx->delay = 0;
786  s->low_delay = 1;
787  s->modified_quant = 1;
788  s->h263_aic = 1;
789  s->h263_plus = 1;
790  s->loop_filter = 1;
791  s->unrestricted_mv = 0;
792  break;
793  case AV_CODEC_ID_MPEG4:
794  s->out_format = FMT_H263;
795  s->h263_pred = 1;
796  s->unrestricted_mv = 1;
797  s->flipflop_rounding = 1;
798  s->low_delay = s->max_b_frames ? 0 : 1;
799  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
800  break;
802  s->out_format = FMT_H263;
803  s->h263_pred = 1;
804  s->unrestricted_mv = 1;
805  s->msmpeg4_version = MSMP4_V2;
806  avctx->delay = 0;
807  s->low_delay = 1;
808  break;
810  s->out_format = FMT_H263;
811  s->h263_pred = 1;
812  s->unrestricted_mv = 1;
813  s->msmpeg4_version = MSMP4_V3;
814  s->flipflop_rounding = 1;
815  avctx->delay = 0;
816  s->low_delay = 1;
817  break;
818  case AV_CODEC_ID_WMV1:
819  s->out_format = FMT_H263;
820  s->h263_pred = 1;
821  s->unrestricted_mv = 1;
822  s->msmpeg4_version = MSMP4_WMV1;
823  s->flipflop_rounding = 1;
824  avctx->delay = 0;
825  s->low_delay = 1;
826  break;
827  case AV_CODEC_ID_WMV2:
828  s->out_format = FMT_H263;
829  s->h263_pred = 1;
830  s->unrestricted_mv = 1;
831  s->msmpeg4_version = MSMP4_WMV2;
832  s->flipflop_rounding = 1;
833  avctx->delay = 0;
834  s->low_delay = 1;
835  break;
836  default:
837  return AVERROR(EINVAL);
838  }
839 
840  avctx->has_b_frames = !s->low_delay;
841 
842  s->encoding = 1;
843 
844  s->progressive_frame =
845  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
847  s->alternate_scan);
848 
849  if (s->lmin > s->lmax) {
850  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", s->lmax);
851  s->lmin = s->lmax;
852  }
853 
854  /* init */
856  if ((ret = ff_mpv_common_init(s)) < 0)
857  return ret;
858 
859  ff_fdctdsp_init(&s->fdsp, avctx);
860  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
861  ff_pixblockdsp_init(&s->pdsp, avctx);
862  ret = me_cmp_init(s, avctx);
863  if (ret < 0)
864  return ret;
865 
866  if (!(avctx->stats_out = av_mallocz(256)) ||
867  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
868  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
869  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
870  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
871  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
872  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
873  !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_B_FRAMES + 1) ||
874  !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_B_FRAMES + 1) ||
875  !(s->new_pic = av_frame_alloc()) ||
876  !(s->picture_pool = ff_mpv_alloc_pic_pool(0)))
877  return AVERROR(ENOMEM);
878 
879  /* Allocate MV tables; the MV and MB tables will be copied
880  * to slice contexts by ff_update_duplicate_context(). */
881  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
882  if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) ||
883  !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) ||
884  !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) ||
885  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_forw_mv_table_base, mv_table_size) ||
886  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_back_mv_table_base, mv_table_size) ||
887  !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size))
888  return AVERROR(ENOMEM);
889  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
890  s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
891  s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
892  s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
893  s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
894  s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
895 
896  /* Allocate MB type table */
897  mb_array_size = s->mb_stride * s->mb_height;
898  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) ||
899  !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) ||
900  !FF_ALLOC_TYPED_ARRAY (s->cplx_tab, mb_array_size) ||
901  !FF_ALLOC_TYPED_ARRAY (s->bits_tab, mb_array_size) ||
902  !FF_ALLOCZ_TYPED_ARRAY(s->mc_mb_var, mb_array_size) ||
903  !FF_ALLOCZ_TYPED_ARRAY(s->mb_var, mb_array_size) ||
904  !(s->mb_mean = av_mallocz(mb_array_size)))
905  return AVERROR(ENOMEM);
906 
907 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
908  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
909  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
910  int16_t (*tmp1)[2];
911  uint8_t *tmp2;
912  if (!(tmp1 = ALLOCZ_ARRAYS(s->b_field_mv_table_base, 8, mv_table_size)) ||
913  !(tmp2 = ALLOCZ_ARRAYS(s->b_field_select_table[0][0], 2 * 4, mv_table_size)) ||
914  !ALLOCZ_ARRAYS(s->p_field_select_table[0], 2 * 2, mv_table_size))
915  return AVERROR(ENOMEM);
916 
917  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
918  tmp1 += s->mb_stride + 1;
919 
920  for (int i = 0; i < 2; i++) {
921  for (int j = 0; j < 2; j++) {
922  for (int k = 0; k < 2; k++) {
923  s->b_field_mv_table[i][j][k] = tmp1;
924  tmp1 += mv_table_size;
925  }
926  s->b_field_select_table[i][j] = tmp2;
927  tmp2 += 2 * mv_table_size;
928  }
929  }
930  }
931 
932  if (s->noise_reduction) {
933  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
934  return AVERROR(ENOMEM);
935  }
936 
938 
939  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
940  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
941  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
942  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
943  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
944  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
945  } else {
946  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
947  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
948  }
949 
950  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
951  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
952 
953  if (s->slice_context_count > 1) {
954  s->rtp_mode = 1;
955 
957  s->h263_slice_structured = 1;
958  }
959 
960  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263) {
962 #if CONFIG_MSMPEG4ENC
963  if (s->msmpeg4_version != MSMP4_UNUSED)
965 #endif
966  }
967 
968  /* init q matrix */
969  for (i = 0; i < 64; i++) {
970  int j = s->idsp.idct_permutation[i];
971  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
972  s->mpeg_quant) {
973  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
974  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
975  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
976  s->intra_matrix[j] =
977  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
978  } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
979  s->intra_matrix[j] =
980  s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
981  } else {
982  /* MPEG-1/2 */
983  s->chroma_intra_matrix[j] =
984  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
985  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
986  }
987  if (avctx->intra_matrix)
988  s->intra_matrix[j] = avctx->intra_matrix[i];
989  if (avctx->inter_matrix)
990  s->inter_matrix[j] = avctx->inter_matrix[i];
991  }
992 
993  /* precompute matrix */
994  /* for mjpeg, we do include qscale in the matrix */
995  if (s->out_format != FMT_MJPEG) {
996  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
997  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
998  31, 1);
999  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1000  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1001  31, 0);
1002  }
1003 
1004  if ((ret = ff_rate_control_init(s)) < 0)
1005  return ret;
1006 
1007  if (s->b_frame_strategy == 2) {
1008  for (i = 0; i < s->max_b_frames + 2; i++) {
1009  s->tmp_frames[i] = av_frame_alloc();
1010  if (!s->tmp_frames[i])
1011  return AVERROR(ENOMEM);
1012 
1013  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1014  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1015  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1016 
1017  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1018  if (ret < 0)
1019  return ret;
1020  }
1021  }
1022 
1023  cpb_props = ff_encode_add_cpb_side_data(avctx);
1024  if (!cpb_props)
1025  return AVERROR(ENOMEM);
1026  cpb_props->max_bitrate = avctx->rc_max_rate;
1027  cpb_props->min_bitrate = avctx->rc_min_rate;
1028  cpb_props->avg_bitrate = avctx->bit_rate;
1029  cpb_props->buffer_size = avctx->rc_buffer_size;
1030 
1031  return 0;
1032 }
1033 
1035 {
1037  int i;
1038 
1039  ff_rate_control_uninit(&s->rc_context);
1040 
1042  ff_refstruct_pool_uninit(&s->picture_pool);
1043 
1044  if (s->input_picture && s->reordered_input_picture) {
1045  for (int i = 0; i < MAX_B_FRAMES + 1; i++) {
1046  ff_refstruct_unref(&s->input_picture[i]);
1047  ff_refstruct_unref(&s->reordered_input_picture[i]);
1048  }
1049  }
1050  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1051  av_frame_free(&s->tmp_frames[i]);
1052 
1053  av_frame_free(&s->new_pic);
1054 
1056 
1057  av_freep(&s->p_mv_table_base);
1058  av_freep(&s->b_forw_mv_table_base);
1059  av_freep(&s->b_back_mv_table_base);
1060  av_freep(&s->b_bidir_forw_mv_table_base);
1061  av_freep(&s->b_bidir_back_mv_table_base);
1062  av_freep(&s->b_direct_mv_table_base);
1063  av_freep(&s->b_field_mv_table_base);
1064  av_freep(&s->b_field_select_table[0][0]);
1065  av_freep(&s->p_field_select_table[0]);
1066 
1067  av_freep(&s->mb_type);
1068  av_freep(&s->lambda_table);
1069 
1070  av_freep(&s->cplx_tab);
1071  av_freep(&s->bits_tab);
1072 
1073  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1074  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1075  s->q_chroma_intra_matrix= NULL;
1076  s->q_chroma_intra_matrix16= NULL;
1077  av_freep(&s->q_intra_matrix);
1078  av_freep(&s->q_inter_matrix);
1079  av_freep(&s->q_intra_matrix16);
1080  av_freep(&s->q_inter_matrix16);
1081  av_freep(&s->input_picture);
1082  av_freep(&s->reordered_input_picture);
1083  av_freep(&s->dct_offset);
1084  av_freep(&s->mb_var);
1085  av_freep(&s->mc_mb_var);
1086  av_freep(&s->mb_mean);
1087 
1088  return 0;
1089 }
1090 
1091 #define IS_ENCODER 1
1093 
1094 static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
1095 {
1096  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1097  /* print DCT coefficients */
1098  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1099  for (int i = 0; i < 6; i++) {
1100  for (int j = 0; j < 64; j++) {
1101  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1102  block[i][s->idsp.idct_permutation[j]]);
1103  }
1104  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1105  }
1106  }
1107 
1109 }
1110 
1111 static int get_sae(const uint8_t *src, int ref, int stride)
1112 {
1113  int x,y;
1114  int acc = 0;
1115 
1116  for (y = 0; y < 16; y++) {
1117  for (x = 0; x < 16; x++) {
1118  acc += FFABS(src[x + y * stride] - ref);
1119  }
1120  }
1121 
1122  return acc;
1123 }
1124 
1125 static int get_intra_count(MpegEncContext *s, const uint8_t *src,
1126  const uint8_t *ref, int stride)
1127 {
1128  int x, y, w, h;
1129  int acc = 0;
1130 
1131  w = s->width & ~15;
1132  h = s->height & ~15;
1133 
1134  for (y = 0; y < h; y += 16) {
1135  for (x = 0; x < w; x += 16) {
1136  int offset = x + y * stride;
1137  int sad = s->sad_cmp[0](NULL, src + offset, ref + offset,
1138  stride, 16);
1139  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1140  int sae = get_sae(src + offset, mean, stride);
1141 
1142  acc += sae + 500 < sad;
1143  }
1144  }
1145  return acc;
1146 }
1147 
1148 /**
1149  * Allocates new buffers for an AVFrame and copies the properties
1150  * from another AVFrame.
1151  */
1152 static int prepare_picture(MpegEncContext *s, AVFrame *f, const AVFrame *props_frame)
1153 {
1154  AVCodecContext *avctx = s->avctx;
1155  int ret;
1156 
1157  f->width = avctx->width + 2 * EDGE_WIDTH;
1158  f->height = avctx->height + 2 * EDGE_WIDTH;
1159 
1161  if (ret < 0)
1162  return ret;
1163 
1164  ret = ff_mpv_pic_check_linesize(avctx, f, &s->linesize, &s->uvlinesize);
1165  if (ret < 0)
1166  return ret;
1167 
1168  for (int i = 0; f->data[i]; i++) {
1169  int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
1170  f->linesize[i] +
1171  (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
1172  f->data[i] += offset;
1173  }
1174  f->width = avctx->width;
1175  f->height = avctx->height;
1176 
1177  ret = av_frame_copy_props(f, props_frame);
1178  if (ret < 0)
1179  return ret;
1180 
1181  return 0;
1182 }
1183 
1184 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1185 {
1186  MPVPicture *pic = NULL;
1187  int64_t pts;
1188  int display_picture_number = 0, ret;
1189  int encoding_delay = s->max_b_frames ? s->max_b_frames
1190  : (s->low_delay ? 0 : 1);
1191  int flush_offset = 1;
1192  int direct = 1;
1193 
1194  av_assert1(!s->input_picture[0]);
1195 
1196  if (pic_arg) {
1197  pts = pic_arg->pts;
1198  display_picture_number = s->input_picture_number++;
1199 
1200  if (pts != AV_NOPTS_VALUE) {
1201  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1202  int64_t last = s->user_specified_pts;
1203 
1204  if (pts <= last) {
1205  av_log(s->avctx, AV_LOG_ERROR,
1206  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1207  pts, last);
1208  return AVERROR(EINVAL);
1209  }
1210 
1211  if (!s->low_delay && display_picture_number == 1)
1212  s->dts_delta = pts - last;
1213  }
1214  s->user_specified_pts = pts;
1215  } else {
1216  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1217  s->user_specified_pts =
1218  pts = s->user_specified_pts + 1;
1219  av_log(s->avctx, AV_LOG_INFO,
1220  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1221  pts);
1222  } else {
1223  pts = display_picture_number;
1224  }
1225  }
1226 
1227  if (pic_arg->linesize[0] != s->linesize ||
1228  pic_arg->linesize[1] != s->uvlinesize ||
1229  pic_arg->linesize[2] != s->uvlinesize)
1230  direct = 0;
1231  if ((s->width & 15) || (s->height & 15))
1232  direct = 0;
1233  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1234  direct = 0;
1235  if (s->linesize & (STRIDE_ALIGN-1))
1236  direct = 0;
1237 
1238  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1239  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1240 
1241  pic = ff_refstruct_pool_get(s->picture_pool);
1242  if (!pic)
1243  return AVERROR(ENOMEM);
1244 
1245  if (direct) {
1246  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1247  goto fail;
1248  pic->shared = 1;
1249  } else {
1250  ret = prepare_picture(s, pic->f, pic_arg);
1251  if (ret < 0)
1252  goto fail;
1253 
1254  for (int i = 0; i < 3; i++) {
1255  ptrdiff_t src_stride = pic_arg->linesize[i];
1256  ptrdiff_t dst_stride = i ? s->uvlinesize : s->linesize;
1257  int h_shift = i ? s->chroma_x_shift : 0;
1258  int v_shift = i ? s->chroma_y_shift : 0;
1259  int w = AV_CEIL_RSHIFT(s->width , h_shift);
1260  int h = AV_CEIL_RSHIFT(s->height, v_shift);
1261  const uint8_t *src = pic_arg->data[i];
1262  uint8_t *dst = pic->f->data[i];
1263  int vpad = 16;
1264 
1265  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1266  && !s->progressive_sequence
1267  && FFALIGN(s->height, 32) - s->height > 16)
1268  vpad = 32;
1269 
1270  if (!s->avctx->rc_buffer_size)
1271  dst += INPLACE_OFFSET;
1272 
1273  if (src_stride == dst_stride)
1274  memcpy(dst, src, src_stride * h - src_stride + w);
1275  else {
1276  int h2 = h;
1277  uint8_t *dst2 = dst;
1278  while (h2--) {
1279  memcpy(dst2, src, w);
1280  dst2 += dst_stride;
1281  src += src_stride;
1282  }
1283  }
1284  if ((s->width & 15) || (s->height & (vpad-1))) {
1285  s->mpvencdsp.draw_edges(dst, dst_stride,
1286  w, h,
1287  16 >> h_shift,
1288  vpad >> v_shift,
1289  EDGE_BOTTOM);
1290  }
1291  }
1292  emms_c();
1293  }
1294 
1295  pic->display_picture_number = display_picture_number;
1296  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1297  } else if (!s->reordered_input_picture[1]) {
1298  /* Flushing: When the above check is true, the encoder is about to run
1299  * out of frames to encode. Check if there are input_pictures left;
1300  * if so, ensure s->input_picture[0] contains the first picture.
1301  * A flush_offset != 1 will only happen if we did not receive enough
1302  * input frames. */
1303  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1304  if (s->input_picture[flush_offset])
1305  break;
1306 
1307  encoding_delay -= flush_offset - 1;
1308  }
1309 
1310  /* shift buffer entries */
1311  for (int i = flush_offset; i <= MAX_B_FRAMES; i++)
1312  s->input_picture[i - flush_offset] = s->input_picture[i];
1313  for (int i = MAX_B_FRAMES + 1 - flush_offset; i <= MAX_B_FRAMES; i++)
1314  s->input_picture[i] = NULL;
1315 
1316  s->input_picture[encoding_delay] = pic;
1317 
1318  return 0;
1319 fail:
1320  ff_refstruct_unref(&pic);
1321  return ret;
1322 }
1323 
1324 static int skip_check(MpegEncContext *s, const MPVPicture *p, const MPVPicture *ref)
1325 {
1326  int x, y, plane;
1327  int score = 0;
1328  int64_t score64 = 0;
1329 
1330  for (plane = 0; plane < 3; plane++) {
1331  const int stride = p->f->linesize[plane];
1332  const int bw = plane ? 1 : 2;
1333  for (y = 0; y < s->mb_height * bw; y++) {
1334  for (x = 0; x < s->mb_width * bw; x++) {
1335  int off = p->shared ? 0 : 16;
1336  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1337  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1338  int v = s->frame_skip_cmp_fn(s, dptr, rptr, stride, 8);
1339 
1340  switch (FFABS(s->frame_skip_exp)) {
1341  case 0: score = FFMAX(score, v); break;
1342  case 1: score += FFABS(v); break;
1343  case 2: score64 += v * (int64_t)v; break;
1344  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1345  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1346  }
1347  }
1348  }
1349  }
1350  emms_c();
1351 
1352  if (score)
1353  score64 = score;
1354  if (s->frame_skip_exp < 0)
1355  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1356  -1.0/s->frame_skip_exp);
1357 
1358  if (score64 < s->frame_skip_threshold)
1359  return 1;
1360  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1361  return 1;
1362  return 0;
1363 }
1364 
1366 {
1367  int ret;
1368  int size = 0;
1369 
1371  if (ret < 0)
1372  return ret;
1373 
1374  do {
1376  if (ret >= 0) {
1377  size += pkt->size;
1379  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1380  return ret;
1381  } while (ret >= 0);
1382 
1383  return size;
1384 }
1385 
1387 {
1388  AVPacket *pkt;
1389  const int scale = s->brd_scale;
1390  int width = s->width >> scale;
1391  int height = s->height >> scale;
1392  int i, j, out_size, p_lambda, b_lambda, lambda2;
1393  int64_t best_rd = INT64_MAX;
1394  int best_b_count = -1;
1395  int ret = 0;
1396 
1397  av_assert0(scale >= 0 && scale <= 3);
1398 
1399  pkt = av_packet_alloc();
1400  if (!pkt)
1401  return AVERROR(ENOMEM);
1402 
1403  //emms_c();
1404  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1405  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1406  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1407  if (!b_lambda) // FIXME we should do this somewhere else
1408  b_lambda = p_lambda;
1409  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1411 
1412  for (i = 0; i < s->max_b_frames + 2; i++) {
1413  const MPVPicture *pre_input_ptr = i ? s->input_picture[i - 1] :
1414  s->next_pic.ptr;
1415 
1416  if (pre_input_ptr) {
1417  const uint8_t *data[4];
1418  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1419 
1420  if (!pre_input_ptr->shared && i) {
1421  data[0] += INPLACE_OFFSET;
1422  data[1] += INPLACE_OFFSET;
1423  data[2] += INPLACE_OFFSET;
1424  }
1425 
1426  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1427  s->tmp_frames[i]->linesize[0],
1428  data[0],
1429  pre_input_ptr->f->linesize[0],
1430  width, height);
1431  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1432  s->tmp_frames[i]->linesize[1],
1433  data[1],
1434  pre_input_ptr->f->linesize[1],
1435  width >> 1, height >> 1);
1436  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1437  s->tmp_frames[i]->linesize[2],
1438  data[2],
1439  pre_input_ptr->f->linesize[2],
1440  width >> 1, height >> 1);
1441  }
1442  }
1443 
1444  for (j = 0; j < s->max_b_frames + 1; j++) {
1445  AVCodecContext *c;
1446  int64_t rd = 0;
1447 
1448  if (!s->input_picture[j])
1449  break;
1450 
1452  if (!c) {
1453  ret = AVERROR(ENOMEM);
1454  goto fail;
1455  }
1456 
1457  c->width = width;
1458  c->height = height;
1460  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1461  c->mb_decision = s->avctx->mb_decision;
1462  c->me_cmp = s->avctx->me_cmp;
1463  c->mb_cmp = s->avctx->mb_cmp;
1464  c->me_sub_cmp = s->avctx->me_sub_cmp;
1465  c->pix_fmt = AV_PIX_FMT_YUV420P;
1466  c->time_base = s->avctx->time_base;
1467  c->max_b_frames = s->max_b_frames;
1468 
1469  ret = avcodec_open2(c, s->avctx->codec, NULL);
1470  if (ret < 0)
1471  goto fail;
1472 
1473 
1474  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1475  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1476 
1477  out_size = encode_frame(c, s->tmp_frames[0], pkt);
1478  if (out_size < 0) {
1479  ret = out_size;
1480  goto fail;
1481  }
1482 
1483  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1484 
1485  for (i = 0; i < s->max_b_frames + 1; i++) {
1486  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1487 
1488  s->tmp_frames[i + 1]->pict_type = is_p ?
1490  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1491 
1492  out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1493  if (out_size < 0) {
1494  ret = out_size;
1495  goto fail;
1496  }
1497 
1498  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1499  }
1500 
1501  /* get the delayed frames */
1503  if (out_size < 0) {
1504  ret = out_size;
1505  goto fail;
1506  }
1507  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1508 
1509  rd += c->error[0] + c->error[1] + c->error[2];
1510 
1511  if (rd < best_rd) {
1512  best_rd = rd;
1513  best_b_count = j;
1514  }
1515 
1516 fail:
1519  if (ret < 0) {
1520  best_b_count = ret;
1521  break;
1522  }
1523  }
1524 
1525  av_packet_free(&pkt);
1526 
1527  return best_b_count;
1528 }
1529 
1530 /**
1531  * Determines whether an input picture is discarded or not
1532  * and if not determines the length of the next chain of B frames
1533  * and moves these pictures (including the P frame) into
1534  * reordered_input_picture.
1535  * input_picture[0] is always NULL when exiting this function, even on error;
1536  * reordered_input_picture[0] is always NULL when exiting this function on error.
1537  */
1539 {
1540  /* Either nothing to do or can't do anything */
1541  if (s->reordered_input_picture[0] || !s->input_picture[0])
1542  return 0;
1543 
1544  /* set next picture type & ordering */
1545  if (s->frame_skip_threshold || s->frame_skip_factor) {
1546  if (s->picture_in_gop_number < s->gop_size &&
1547  s->next_pic.ptr &&
1548  skip_check(s, s->input_picture[0], s->next_pic.ptr)) {
1549  // FIXME check that the gop check above is +-1 correct
1550  ff_refstruct_unref(&s->input_picture[0]);
1551 
1552  ff_vbv_update(s, 0);
1553 
1554  return 0;
1555  }
1556  }
1557 
1558  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1559  !s->next_pic.ptr || s->intra_only) {
1560  s->reordered_input_picture[0] = s->input_picture[0];
1561  s->input_picture[0] = NULL;
1562  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1563  s->reordered_input_picture[0]->coded_picture_number =
1564  s->coded_picture_number++;
1565  } else {
1566  int b_frames = 0;
1567 
1568  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1569  for (int i = 0; i < s->max_b_frames + 1; i++) {
1570  int pict_num = s->input_picture[0]->display_picture_number + i;
1571 
1572  if (pict_num >= s->rc_context.num_entries)
1573  break;
1574  if (!s->input_picture[i]) {
1575  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1576  break;
1577  }
1578 
1579  s->input_picture[i]->f->pict_type =
1580  s->rc_context.entry[pict_num].new_pict_type;
1581  }
1582  }
1583 
1584  if (s->b_frame_strategy == 0) {
1585  b_frames = s->max_b_frames;
1586  while (b_frames && !s->input_picture[b_frames])
1587  b_frames--;
1588  } else if (s->b_frame_strategy == 1) {
1589  int i;
1590  for (i = 1; i < s->max_b_frames + 1; i++) {
1591  if (s->input_picture[i] &&
1592  s->input_picture[i]->b_frame_score == 0) {
1593  s->input_picture[i]->b_frame_score =
1595  s->input_picture[i ]->f->data[0],
1596  s->input_picture[i - 1]->f->data[0],
1597  s->linesize) + 1;
1598  }
1599  }
1600  for (i = 0; i < s->max_b_frames + 1; i++) {
1601  if (!s->input_picture[i] ||
1602  s->input_picture[i]->b_frame_score - 1 >
1603  s->mb_num / s->b_sensitivity)
1604  break;
1605  }
1606 
1607  b_frames = FFMAX(0, i - 1);
1608 
1609  /* reset scores */
1610  for (i = 0; i < b_frames + 1; i++) {
1611  s->input_picture[i]->b_frame_score = 0;
1612  }
1613  } else if (s->b_frame_strategy == 2) {
1614  b_frames = estimate_best_b_count(s);
1615  if (b_frames < 0) {
1616  ff_refstruct_unref(&s->input_picture[0]);
1617  return b_frames;
1618  }
1619  }
1620 
1621  emms_c();
1622 
1623  for (int i = b_frames - 1; i >= 0; i--) {
1624  int type = s->input_picture[i]->f->pict_type;
1625  if (type && type != AV_PICTURE_TYPE_B)
1626  b_frames = i;
1627  }
1628  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1629  b_frames == s->max_b_frames) {
1630  av_log(s->avctx, AV_LOG_ERROR,
1631  "warning, too many B-frames in a row\n");
1632  }
1633 
1634  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1635  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1636  s->gop_size > s->picture_in_gop_number) {
1637  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1638  } else {
1639  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1640  b_frames = 0;
1641  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1642  }
1643  }
1644 
1645  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1646  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1647  b_frames--;
1648 
1649  s->reordered_input_picture[0] = s->input_picture[b_frames];
1650  s->input_picture[b_frames] = NULL;
1651  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1652  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1653  s->reordered_input_picture[0]->coded_picture_number =
1654  s->coded_picture_number++;
1655  for (int i = 0; i < b_frames; i++) {
1656  s->reordered_input_picture[i + 1] = s->input_picture[i];
1657  s->input_picture[i] = NULL;
1658  s->reordered_input_picture[i + 1]->f->pict_type =
1660  s->reordered_input_picture[i + 1]->coded_picture_number =
1661  s->coded_picture_number++;
1662  }
1663  }
1664 
1665  return 0;
1666 }
1667 
1669 {
1670  int ret;
1671 
1672  av_assert1(!s->reordered_input_picture[0]);
1673 
1674  for (int i = 1; i <= MAX_B_FRAMES; i++)
1675  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1676  s->reordered_input_picture[MAX_B_FRAMES] = NULL;
1677 
1679  av_assert1(!s->input_picture[0]);
1680  if (ret < 0)
1681  return ret;
1682 
1683  av_frame_unref(s->new_pic);
1684 
1685  if (s->reordered_input_picture[0]) {
1686  s->reordered_input_picture[0]->reference =
1687  s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_B;
1688 
1689  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1690  // input is a shared pix, so we can't modify it -> allocate a new
1691  // one & ensure that the shared one is reuseable
1692  av_frame_move_ref(s->new_pic, s->reordered_input_picture[0]->f);
1693 
1694  ret = prepare_picture(s, s->reordered_input_picture[0]->f, s->new_pic);
1695  if (ret < 0)
1696  goto fail;
1697  } else {
1698  // input is not a shared pix -> reuse buffer for current_pix
1699  ret = av_frame_ref(s->new_pic, s->reordered_input_picture[0]->f);
1700  if (ret < 0)
1701  goto fail;
1702  for (int i = 0; i < MPV_MAX_PLANES; i++) {
1703  if (s->new_pic->data[i])
1704  s->new_pic->data[i] += INPLACE_OFFSET;
1705  }
1706  }
1707  s->cur_pic.ptr = s->reordered_input_picture[0];
1708  s->reordered_input_picture[0] = NULL;
1709  av_assert1(s->mb_width == s->buffer_pools.alloc_mb_width);
1710  av_assert1(s->mb_height == s->buffer_pools.alloc_mb_height);
1711  av_assert1(s->mb_stride == s->buffer_pools.alloc_mb_stride);
1712  ret = ff_mpv_alloc_pic_accessories(s->avctx, &s->cur_pic,
1713  &s->sc, &s->buffer_pools, s->mb_height);
1714  if (ret < 0) {
1715  ff_mpv_unref_picture(&s->cur_pic);
1716  return ret;
1717  }
1718  s->picture_number = s->cur_pic.ptr->display_picture_number;
1719 
1720  }
1721  return 0;
1722 fail:
1723  ff_refstruct_unref(&s->reordered_input_picture[0]);
1724  return ret;
1725 }
1726 
1728 {
1729  if (s->unrestricted_mv &&
1730  s->cur_pic.reference &&
1731  !s->intra_only) {
1732  int hshift = s->chroma_x_shift;
1733  int vshift = s->chroma_y_shift;
1734  s->mpvencdsp.draw_edges(s->cur_pic.data[0],
1735  s->cur_pic.linesize[0],
1736  s->h_edge_pos, s->v_edge_pos,
1738  EDGE_TOP | EDGE_BOTTOM);
1739  s->mpvencdsp.draw_edges(s->cur_pic.data[1],
1740  s->cur_pic.linesize[1],
1741  s->h_edge_pos >> hshift,
1742  s->v_edge_pos >> vshift,
1743  EDGE_WIDTH >> hshift,
1744  EDGE_WIDTH >> vshift,
1745  EDGE_TOP | EDGE_BOTTOM);
1746  s->mpvencdsp.draw_edges(s->cur_pic.data[2],
1747  s->cur_pic.linesize[2],
1748  s->h_edge_pos >> hshift,
1749  s->v_edge_pos >> vshift,
1750  EDGE_WIDTH >> hshift,
1751  EDGE_WIDTH >> vshift,
1752  EDGE_TOP | EDGE_BOTTOM);
1753  }
1754 
1755  emms_c();
1756 
1757  s->last_pict_type = s->pict_type;
1758  s->last_lambda_for [s->pict_type] = s->cur_pic.ptr->f->quality;
1759  if (s->pict_type!= AV_PICTURE_TYPE_B)
1760  s->last_non_b_pict_type = s->pict_type;
1761 }
1762 
1764 {
1765  int intra, i;
1766 
1767  for (intra = 0; intra < 2; intra++) {
1768  if (s->dct_count[intra] > (1 << 16)) {
1769  for (i = 0; i < 64; i++) {
1770  s->dct_error_sum[intra][i] >>= 1;
1771  }
1772  s->dct_count[intra] >>= 1;
1773  }
1774 
1775  for (i = 0; i < 64; i++) {
1776  s->dct_offset[intra][i] = (s->noise_reduction *
1777  s->dct_count[intra] +
1778  s->dct_error_sum[intra][i] / 2) /
1779  (s->dct_error_sum[intra][i] + 1);
1780  }
1781  }
1782 }
1783 
1785 {
1786  s->cur_pic.ptr->f->pict_type = s->pict_type;
1787 
1788  if (s->pict_type != AV_PICTURE_TYPE_B) {
1789  ff_mpv_replace_picture(&s->last_pic, &s->next_pic);
1790  ff_mpv_replace_picture(&s->next_pic, &s->cur_pic);
1791  }
1792 
1793  if (s->dct_error_sum) {
1794  av_assert2(s->noise_reduction && s->encoding);
1796  }
1797 }
1798 
1800  const AVFrame *pic_arg, int *got_packet)
1801 {
1803  int stuffing_count, ret;
1804  int context_count = s->slice_context_count;
1805 
1806  ff_mpv_unref_picture(&s->cur_pic);
1807 
1808  s->vbv_ignore_qmax = 0;
1809 
1810  s->picture_in_gop_number++;
1811 
1812  if (load_input_picture(s, pic_arg) < 0)
1813  return -1;
1814 
1815  if (select_input_picture(s) < 0) {
1816  return -1;
1817  }
1818 
1819  /* output? */
1820  if (s->new_pic->data[0]) {
1821  int growing_buffer = context_count == 1 && !s->data_partitioning;
1822  size_t pkt_size = 10000 + s->mb_width * s->mb_height *
1823  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1824  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1825  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1826  if (ret < 0)
1827  return ret;
1828  }
1829  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1830  return ret;
1832  if (s->mb_info) {
1833  s->mb_info_ptr = av_packet_new_side_data(pkt,
1835  s->mb_width*s->mb_height*12);
1836  if (!s->mb_info_ptr)
1837  return AVERROR(ENOMEM);
1838  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1839  }
1840 
1841  s->pict_type = s->new_pic->pict_type;
1842  //emms_c();
1843  frame_start(s);
1844 vbv_retry:
1845  ret = encode_picture(s, pkt);
1846  if (growing_buffer) {
1847  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1848  pkt->data = s->pb.buf;
1850  }
1851  if (ret < 0)
1852  return -1;
1853 
1854  frame_end(s);
1855 
1856  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1857  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1858 
1859  if (avctx->rc_buffer_size) {
1860  RateControlContext *rcc = &s->rc_context;
1861  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1862  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1863  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1864 
1865  if (put_bits_count(&s->pb) > max_size &&
1866  s->lambda < s->lmax) {
1867  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1868  (s->qscale + 1) / s->qscale);
1869  if (s->adaptive_quant) {
1870  int i;
1871  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1872  s->lambda_table[i] =
1873  FFMAX(s->lambda_table[i] + min_step,
1874  s->lambda_table[i] * (s->qscale + 1) /
1875  s->qscale);
1876  }
1877  s->mb_skipped = 0; // done in frame_start()
1878  // done in encode_picture() so we must undo it
1879  if (s->pict_type == AV_PICTURE_TYPE_P) {
1880  s->no_rounding ^= s->flipflop_rounding;
1881  }
1882  if (s->pict_type != AV_PICTURE_TYPE_B) {
1883  s->time_base = s->last_time_base;
1884  s->last_non_b_time = s->time - s->pp_time;
1885  }
1886  s->vbv_ignore_qmax = 1;
1887  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1888  goto vbv_retry;
1889  }
1890 
1892  }
1893 
1896 
1897  for (int i = 0; i < MPV_MAX_PLANES; i++)
1898  avctx->error[i] += s->encoding_error[i];
1899  ff_side_data_set_encoder_stats(pkt, s->cur_pic.ptr->f->quality,
1900  s->encoding_error,
1902  s->pict_type);
1903 
1905  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1906  s->misc_bits + s->i_tex_bits +
1907  s->p_tex_bits);
1908  flush_put_bits(&s->pb);
1909  s->frame_bits = put_bits_count(&s->pb);
1910 
1911  stuffing_count = ff_vbv_update(s, s->frame_bits);
1912  s->stuffing_bits = 8*stuffing_count;
1913  if (stuffing_count) {
1914  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1915  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1916  return -1;
1917  }
1918 
1919  switch (s->codec_id) {
1922  while (stuffing_count--) {
1923  put_bits(&s->pb, 8, 0);
1924  }
1925  break;
1926  case AV_CODEC_ID_MPEG4:
1927  put_bits(&s->pb, 16, 0);
1928  put_bits(&s->pb, 16, 0x1C3);
1929  stuffing_count -= 4;
1930  while (stuffing_count--) {
1931  put_bits(&s->pb, 8, 0xFF);
1932  }
1933  break;
1934  default:
1935  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1936  s->stuffing_bits = 0;
1937  }
1938  flush_put_bits(&s->pb);
1939  s->frame_bits = put_bits_count(&s->pb);
1940  }
1941 
1942  /* update MPEG-1/2 vbv_delay for CBR */
1943  if (avctx->rc_max_rate &&
1945  s->out_format == FMT_MPEG1 &&
1946  90000LL * (avctx->rc_buffer_size - 1) <=
1947  avctx->rc_max_rate * 0xFFFFLL) {
1948  AVCPBProperties *props;
1949  size_t props_size;
1950 
1951  int vbv_delay, min_delay;
1952  double inbits = avctx->rc_max_rate *
1954  int minbits = s->frame_bits - 8 *
1955  (s->vbv_delay_pos - 1);
1956  double bits = s->rc_context.buffer_index + minbits - inbits;
1957  uint8_t *const vbv_delay_ptr = s->pb.buf + s->vbv_delay_pos;
1958 
1959  if (bits < 0)
1961  "Internal error, negative bits\n");
1962 
1963  av_assert1(s->repeat_first_field == 0);
1964 
1965  vbv_delay = bits * 90000 / avctx->rc_max_rate;
1966  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1967  avctx->rc_max_rate;
1968 
1969  vbv_delay = FFMAX(vbv_delay, min_delay);
1970 
1971  av_assert0(vbv_delay < 0xFFFF);
1972 
1973  vbv_delay_ptr[0] &= 0xF8;
1974  vbv_delay_ptr[0] |= vbv_delay >> 13;
1975  vbv_delay_ptr[1] = vbv_delay >> 5;
1976  vbv_delay_ptr[2] &= 0x07;
1977  vbv_delay_ptr[2] |= vbv_delay << 3;
1978 
1979  props = av_cpb_properties_alloc(&props_size);
1980  if (!props)
1981  return AVERROR(ENOMEM);
1982  props->vbv_delay = vbv_delay * 300;
1983 
1985  (uint8_t*)props, props_size);
1986  if (ret < 0) {
1987  av_freep(&props);
1988  return ret;
1989  }
1990  }
1991  s->total_bits += s->frame_bits;
1992 
1993  pkt->pts = s->cur_pic.ptr->f->pts;
1994  pkt->duration = s->cur_pic.ptr->f->duration;
1995  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1996  if (!s->cur_pic.ptr->coded_picture_number)
1997  pkt->dts = pkt->pts - s->dts_delta;
1998  else
1999  pkt->dts = s->reordered_pts;
2000  s->reordered_pts = pkt->pts;
2001  } else
2002  pkt->dts = pkt->pts;
2003 
2004  // the no-delay case is handled in generic code
2006  ret = ff_encode_reordered_opaque(avctx, pkt, s->cur_pic.ptr->f);
2007  if (ret < 0)
2008  return ret;
2009  }
2010 
2011  if (s->cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
2013  if (s->mb_info)
2015  } else {
2016  s->frame_bits = 0;
2017  }
2018 
2019  ff_mpv_unref_picture(&s->cur_pic);
2020 
2021  av_assert1((s->frame_bits & 7) == 0);
2022 
2023  pkt->size = s->frame_bits / 8;
2024  *got_packet = !!pkt->size;
2025  return 0;
2026 }
2027 
2029  int n, int threshold)
2030 {
2031  static const char tab[64] = {
2032  3, 2, 2, 1, 1, 1, 1, 1,
2033  1, 1, 1, 1, 1, 1, 1, 1,
2034  1, 1, 1, 1, 1, 1, 1, 1,
2035  0, 0, 0, 0, 0, 0, 0, 0,
2036  0, 0, 0, 0, 0, 0, 0, 0,
2037  0, 0, 0, 0, 0, 0, 0, 0,
2038  0, 0, 0, 0, 0, 0, 0, 0,
2039  0, 0, 0, 0, 0, 0, 0, 0
2040  };
2041  int score = 0;
2042  int run = 0;
2043  int i;
2044  int16_t *block = s->block[n];
2045  const int last_index = s->block_last_index[n];
2046  int skip_dc;
2047 
2048  if (threshold < 0) {
2049  skip_dc = 0;
2050  threshold = -threshold;
2051  } else
2052  skip_dc = 1;
2053 
2054  /* Are all we could set to zero already zero? */
2055  if (last_index <= skip_dc - 1)
2056  return;
2057 
2058  for (i = 0; i <= last_index; i++) {
2059  const int j = s->intra_scantable.permutated[i];
2060  const int level = FFABS(block[j]);
2061  if (level == 1) {
2062  if (skip_dc && i == 0)
2063  continue;
2064  score += tab[run];
2065  run = 0;
2066  } else if (level > 1) {
2067  return;
2068  } else {
2069  run++;
2070  }
2071  }
2072  if (score >= threshold)
2073  return;
2074  for (i = skip_dc; i <= last_index; i++) {
2075  const int j = s->intra_scantable.permutated[i];
2076  block[j] = 0;
2077  }
2078  if (block[0])
2079  s->block_last_index[n] = 0;
2080  else
2081  s->block_last_index[n] = -1;
2082 }
2083 
2084 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2085  int last_index)
2086 {
2087  int i;
2088  const int maxlevel = s->max_qcoeff;
2089  const int minlevel = s->min_qcoeff;
2090  int overflow = 0;
2091 
2092  if (s->mb_intra) {
2093  i = 1; // skip clipping of intra dc
2094  } else
2095  i = 0;
2096 
2097  for (; i <= last_index; i++) {
2098  const int j = s->intra_scantable.permutated[i];
2099  int level = block[j];
2100 
2101  if (level > maxlevel) {
2102  level = maxlevel;
2103  overflow++;
2104  } else if (level < minlevel) {
2105  level = minlevel;
2106  overflow++;
2107  }
2108 
2109  block[j] = level;
2110  }
2111 
2112  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2113  av_log(s->avctx, AV_LOG_INFO,
2114  "warning, clipping %d dct coefficients to %d..%d\n",
2115  overflow, minlevel, maxlevel);
2116 }
2117 
2118 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2119 {
2120  int x, y;
2121  // FIXME optimize
2122  for (y = 0; y < 8; y++) {
2123  for (x = 0; x < 8; x++) {
2124  int x2, y2;
2125  int sum = 0;
2126  int sqr = 0;
2127  int count = 0;
2128 
2129  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2130  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2131  int v = ptr[x2 + y2 * stride];
2132  sum += v;
2133  sqr += v * v;
2134  count++;
2135  }
2136  }
2137  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2138  }
2139  }
2140 }
2141 
2143  int motion_x, int motion_y,
2144  int mb_block_height,
2145  int mb_block_width,
2146  int mb_block_count,
2147  int chroma_x_shift,
2148  int chroma_y_shift,
2149  int chroma_format)
2150 {
2151 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2152  * and neither of these encoders currently supports 444. */
2153 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2154  (s)->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2155  int16_t weight[12][64];
2156  int16_t orig[12][64];
2157  const int mb_x = s->mb_x;
2158  const int mb_y = s->mb_y;
2159  int i;
2160  int skip_dct[12];
2161  int dct_offset = s->linesize * 8; // default for progressive frames
2162  int uv_dct_offset = s->uvlinesize * 8;
2163  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2164  ptrdiff_t wrap_y, wrap_c;
2165 
2166  for (i = 0; i < mb_block_count; i++)
2167  skip_dct[i] = s->skipdct;
2168 
2169  if (s->adaptive_quant) {
2170  const int last_qp = s->qscale;
2171  const int mb_xy = mb_x + mb_y * s->mb_stride;
2172 
2173  s->lambda = s->lambda_table[mb_xy];
2174  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
2176 
2177  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2178  s->dquant = s->cur_pic.qscale_table[mb_xy] - last_qp;
2179 
2180  if (s->out_format == FMT_H263) {
2181  s->dquant = av_clip(s->dquant, -2, 2);
2182 
2183  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2184  if (!s->mb_intra) {
2185  if (s->pict_type == AV_PICTURE_TYPE_B) {
2186  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2187  s->dquant = 0;
2188  }
2189  if (s->mv_type == MV_TYPE_8X8)
2190  s->dquant = 0;
2191  }
2192  }
2193  }
2194  }
2195  ff_set_qscale(s, last_qp + s->dquant);
2196  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2197  ff_set_qscale(s, s->qscale + s->dquant);
2198 
2199  wrap_y = s->linesize;
2200  wrap_c = s->uvlinesize;
2201  ptr_y = s->new_pic->data[0] +
2202  (mb_y * 16 * wrap_y) + mb_x * 16;
2203  ptr_cb = s->new_pic->data[1] +
2204  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2205  ptr_cr = s->new_pic->data[2] +
2206  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2207 
2208  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2209  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2210  int cw = (s->width + chroma_x_shift) >> chroma_x_shift;
2211  int ch = (s->height + chroma_y_shift) >> chroma_y_shift;
2212  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2213  wrap_y, wrap_y,
2214  16, 16, mb_x * 16, mb_y * 16,
2215  s->width, s->height);
2216  ptr_y = ebuf;
2217  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2218  wrap_c, wrap_c,
2219  mb_block_width, mb_block_height,
2220  mb_x * mb_block_width, mb_y * mb_block_height,
2221  cw, ch);
2222  ptr_cb = ebuf + 16 * wrap_y;
2223  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2224  wrap_c, wrap_c,
2225  mb_block_width, mb_block_height,
2226  mb_x * mb_block_width, mb_y * mb_block_height,
2227  cw, ch);
2228  ptr_cr = ebuf + 16 * wrap_y + 16;
2229  }
2230 
2231  if (s->mb_intra) {
2232  if (INTERLACED_DCT(s)) {
2233  int progressive_score, interlaced_score;
2234 
2235  s->interlaced_dct = 0;
2236  progressive_score = s->ildct_cmp[1](s, ptr_y, NULL, wrap_y, 8) +
2237  s->ildct_cmp[1](s, ptr_y + wrap_y * 8,
2238  NULL, wrap_y, 8) - 400;
2239 
2240  if (progressive_score > 0) {
2241  interlaced_score = s->ildct_cmp[1](s, ptr_y,
2242  NULL, wrap_y * 2, 8) +
2243  s->ildct_cmp[1](s, ptr_y + wrap_y,
2244  NULL, wrap_y * 2, 8);
2245  if (progressive_score > interlaced_score) {
2246  s->interlaced_dct = 1;
2247 
2248  dct_offset = wrap_y;
2249  uv_dct_offset = wrap_c;
2250  wrap_y <<= 1;
2251  if (chroma_format == CHROMA_422 ||
2253  wrap_c <<= 1;
2254  }
2255  }
2256  }
2257 
2258  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2259  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2260  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2261  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2262 
2263  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2264  skip_dct[4] = 1;
2265  skip_dct[5] = 1;
2266  } else {
2267  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2268  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2269  if (chroma_format == CHROMA_422) {
2270  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2271  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2272  } else if (chroma_format == CHROMA_444) {
2273  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2274  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2275  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2276  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2277  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2278  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2279  }
2280  }
2281  } else {
2282  op_pixels_func (*op_pix)[4];
2283  qpel_mc_func (*op_qpix)[16];
2284  uint8_t *dest_y, *dest_cb, *dest_cr;
2285 
2286  dest_y = s->dest[0];
2287  dest_cb = s->dest[1];
2288  dest_cr = s->dest[2];
2289 
2290  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2291  op_pix = s->hdsp.put_pixels_tab;
2292  op_qpix = s->qdsp.put_qpel_pixels_tab;
2293  } else {
2294  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2295  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2296  }
2297 
2298  if (s->mv_dir & MV_DIR_FORWARD) {
2299  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2300  s->last_pic.data,
2301  op_pix, op_qpix);
2302  op_pix = s->hdsp.avg_pixels_tab;
2303  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2304  }
2305  if (s->mv_dir & MV_DIR_BACKWARD) {
2306  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2307  s->next_pic.data,
2308  op_pix, op_qpix);
2309  }
2310 
2311  if (INTERLACED_DCT(s)) {
2312  int progressive_score, interlaced_score;
2313 
2314  s->interlaced_dct = 0;
2315  progressive_score = s->ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2316  s->ildct_cmp[0](s, dest_y + wrap_y * 8,
2317  ptr_y + wrap_y * 8,
2318  wrap_y, 8) - 400;
2319 
2320  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2321  progressive_score -= 400;
2322 
2323  if (progressive_score > 0) {
2324  interlaced_score = s->ildct_cmp[0](s, dest_y, ptr_y,
2325  wrap_y * 2, 8) +
2326  s->ildct_cmp[0](s, dest_y + wrap_y,
2327  ptr_y + wrap_y,
2328  wrap_y * 2, 8);
2329 
2330  if (progressive_score > interlaced_score) {
2331  s->interlaced_dct = 1;
2332 
2333  dct_offset = wrap_y;
2334  uv_dct_offset = wrap_c;
2335  wrap_y <<= 1;
2336  if (chroma_format == CHROMA_422)
2337  wrap_c <<= 1;
2338  }
2339  }
2340  }
2341 
2342  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2343  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2344  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2345  dest_y + dct_offset, wrap_y);
2346  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2347  dest_y + dct_offset + 8, wrap_y);
2348 
2349  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2350  skip_dct[4] = 1;
2351  skip_dct[5] = 1;
2352  } else {
2353  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2354  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2355  if (!chroma_y_shift) { /* 422 */
2356  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2357  dest_cb + uv_dct_offset, wrap_c);
2358  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2359  dest_cr + uv_dct_offset, wrap_c);
2360  }
2361  }
2362  /* pre quantization */
2363  if (s->mc_mb_var[s->mb_stride * mb_y + mb_x] < 2 * s->qscale * s->qscale) {
2364  // FIXME optimize
2365  if (s->sad_cmp[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2366  skip_dct[0] = 1;
2367  if (s->sad_cmp[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2368  skip_dct[1] = 1;
2369  if (s->sad_cmp[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2370  wrap_y, 8) < 20 * s->qscale)
2371  skip_dct[2] = 1;
2372  if (s->sad_cmp[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2373  wrap_y, 8) < 20 * s->qscale)
2374  skip_dct[3] = 1;
2375  if (s->sad_cmp[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2376  skip_dct[4] = 1;
2377  if (s->sad_cmp[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2378  skip_dct[5] = 1;
2379  if (!chroma_y_shift) { /* 422 */
2380  if (s->sad_cmp[1](NULL, ptr_cb + uv_dct_offset,
2381  dest_cb + uv_dct_offset,
2382  wrap_c, 8) < 20 * s->qscale)
2383  skip_dct[6] = 1;
2384  if (s->sad_cmp[1](NULL, ptr_cr + uv_dct_offset,
2385  dest_cr + uv_dct_offset,
2386  wrap_c, 8) < 20 * s->qscale)
2387  skip_dct[7] = 1;
2388  }
2389  }
2390  }
2391 
2392  if (s->quantizer_noise_shaping) {
2393  if (!skip_dct[0])
2394  get_visual_weight(weight[0], ptr_y , wrap_y);
2395  if (!skip_dct[1])
2396  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2397  if (!skip_dct[2])
2398  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2399  if (!skip_dct[3])
2400  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2401  if (!skip_dct[4])
2402  get_visual_weight(weight[4], ptr_cb , wrap_c);
2403  if (!skip_dct[5])
2404  get_visual_weight(weight[5], ptr_cr , wrap_c);
2405  if (!chroma_y_shift) { /* 422 */
2406  if (!skip_dct[6])
2407  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2408  wrap_c);
2409  if (!skip_dct[7])
2410  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2411  wrap_c);
2412  }
2413  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2414  }
2415 
2416  /* DCT & quantize */
2417  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2418  {
2419  for (i = 0; i < mb_block_count; i++) {
2420  if (!skip_dct[i]) {
2421  int overflow;
2422  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2423  // FIXME we could decide to change to quantizer instead of
2424  // clipping
2425  // JS: I don't think that would be a good idea it could lower
2426  // quality instead of improve it. Just INTRADC clipping
2427  // deserves changes in quantizer
2428  if (overflow)
2429  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2430  } else
2431  s->block_last_index[i] = -1;
2432  }
2433  if (s->quantizer_noise_shaping) {
2434  for (i = 0; i < mb_block_count; i++) {
2435  if (!skip_dct[i]) {
2436  s->block_last_index[i] =
2437  dct_quantize_refine(s, s->block[i], weight[i],
2438  orig[i], i, s->qscale);
2439  }
2440  }
2441  }
2442 
2443  if (s->luma_elim_threshold && !s->mb_intra)
2444  for (i = 0; i < 4; i++)
2445  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2446  if (s->chroma_elim_threshold && !s->mb_intra)
2447  for (i = 4; i < mb_block_count; i++)
2448  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2449 
2450  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2451  for (i = 0; i < mb_block_count; i++) {
2452  if (s->block_last_index[i] == -1)
2453  s->coded_score[i] = INT_MAX / 256;
2454  }
2455  }
2456  }
2457 
2458  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2459  s->block_last_index[4] =
2460  s->block_last_index[5] = 0;
2461  s->block[4][0] =
2462  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2463  if (!chroma_y_shift) { /* 422 / 444 */
2464  for (i=6; i<12; i++) {
2465  s->block_last_index[i] = 0;
2466  s->block[i][0] = s->block[4][0];
2467  }
2468  }
2469  }
2470 
2471  // non c quantize code returns incorrect block_last_index FIXME
2472  if (s->alternate_scan && s->dct_quantize != dct_quantize_c) {
2473  for (i = 0; i < mb_block_count; i++) {
2474  int j;
2475  if (s->block_last_index[i] > 0) {
2476  for (j = 63; j > 0; j--) {
2477  if (s->block[i][s->intra_scantable.permutated[j]])
2478  break;
2479  }
2480  s->block_last_index[i] = j;
2481  }
2482  }
2483  }
2484 
2485  /* huffman encode */
2486  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2489  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2490  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2491  break;
2492  case AV_CODEC_ID_MPEG4:
2493  if (CONFIG_MPEG4_ENCODER)
2494  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2495  break;
2496  case AV_CODEC_ID_MSMPEG4V2:
2497  case AV_CODEC_ID_MSMPEG4V3:
2498  case AV_CODEC_ID_WMV1:
2499  if (CONFIG_MSMPEG4ENC)
2500  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2501  break;
2502  case AV_CODEC_ID_WMV2:
2503  if (CONFIG_WMV2_ENCODER)
2504  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2505  break;
2506  case AV_CODEC_ID_H261:
2507  if (CONFIG_H261_ENCODER)
2508  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2509  break;
2510  case AV_CODEC_ID_H263:
2511  case AV_CODEC_ID_H263P:
2512  case AV_CODEC_ID_FLV1:
2513  case AV_CODEC_ID_RV10:
2514  case AV_CODEC_ID_RV20:
2515  if (CONFIG_H263_ENCODER)
2516  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2517  break;
2518 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2519  case AV_CODEC_ID_MJPEG:
2520  case AV_CODEC_ID_AMV:
2521  ff_mjpeg_encode_mb(s, s->block);
2522  break;
2523 #endif
2524  case AV_CODEC_ID_SPEEDHQ:
2525  if (CONFIG_SPEEDHQ_ENCODER)
2526  ff_speedhq_encode_mb(s, s->block);
2527  break;
2528  default:
2529  av_assert1(0);
2530  }
2531 }
2532 
2533 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2534 {
2535  if (s->chroma_format == CHROMA_420)
2536  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2537  else if (s->chroma_format == CHROMA_422)
2538  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2539  else
2540  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2541 }
2542 
2544  const MpegEncContext *s)
2545 {
2546  int i;
2547 
2548  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2549 
2550  /* MPEG-1 */
2551  d->mb_skip_run= s->mb_skip_run;
2552  for(i=0; i<3; i++)
2553  d->last_dc[i] = s->last_dc[i];
2554 
2555  /* statistics */
2556  d->mv_bits= s->mv_bits;
2557  d->i_tex_bits= s->i_tex_bits;
2558  d->p_tex_bits= s->p_tex_bits;
2559  d->i_count= s->i_count;
2560  d->misc_bits= s->misc_bits;
2561  d->last_bits= 0;
2562 
2563  d->mb_skipped= 0;
2564  d->qscale= s->qscale;
2565  d->dquant= s->dquant;
2566 
2567  d->esc3_level_length= s->esc3_level_length;
2568 }
2569 
2571  const MpegEncContext *s)
2572 {
2573  int i;
2574 
2575  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2576  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2577 
2578  /* MPEG-1 */
2579  d->mb_skip_run= s->mb_skip_run;
2580  for(i=0; i<3; i++)
2581  d->last_dc[i] = s->last_dc[i];
2582 
2583  /* statistics */
2584  d->mv_bits= s->mv_bits;
2585  d->i_tex_bits= s->i_tex_bits;
2586  d->p_tex_bits= s->p_tex_bits;
2587  d->i_count= s->i_count;
2588  d->misc_bits= s->misc_bits;
2589 
2590  d->mb_intra= s->mb_intra;
2591  d->mb_skipped= s->mb_skipped;
2592  d->mv_type= s->mv_type;
2593  d->mv_dir= s->mv_dir;
2594  d->pb= s->pb;
2595  if(s->data_partitioning){
2596  d->pb2= s->pb2;
2597  d->tex_pb= s->tex_pb;
2598  }
2599  d->block= s->block;
2600  for(i=0; i<8; i++)
2601  d->block_last_index[i]= s->block_last_index[i];
2602  d->interlaced_dct= s->interlaced_dct;
2603  d->qscale= s->qscale;
2604 
2605  d->esc3_level_length= s->esc3_level_length;
2606 }
2607 
2610  int *dmin, int *next_block, int motion_x, int motion_y)
2611 {
2612  int score;
2613  uint8_t *dest_backup[3];
2614 
2615  copy_context_before_encode(s, backup);
2616 
2617  s->block= s->blocks[*next_block];
2618  s->pb= pb[*next_block];
2619  if(s->data_partitioning){
2620  s->pb2 = pb2 [*next_block];
2621  s->tex_pb= tex_pb[*next_block];
2622  }
2623 
2624  if(*next_block){
2625  memcpy(dest_backup, s->dest, sizeof(s->dest));
2626  s->dest[0] = s->sc.rd_scratchpad;
2627  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2628  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2629  av_assert0(s->linesize >= 32); //FIXME
2630  }
2631 
2632  encode_mb(s, motion_x, motion_y);
2633 
2634  score= put_bits_count(&s->pb);
2635  if(s->data_partitioning){
2636  score+= put_bits_count(&s->pb2);
2637  score+= put_bits_count(&s->tex_pb);
2638  }
2639 
2640  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2641  mpv_reconstruct_mb(s, s->block);
2642 
2643  score *= s->lambda2;
2644  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2645  }
2646 
2647  if(*next_block){
2648  memcpy(s->dest, dest_backup, sizeof(s->dest));
2649  }
2650 
2651  if(score<*dmin){
2652  *dmin= score;
2653  *next_block^=1;
2654 
2656  }
2657 }
2658 
2659 static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride){
2660  const uint32_t *sq = ff_square_tab + 256;
2661  int acc=0;
2662  int x,y;
2663 
2664  if(w==16 && h==16)
2665  return s->sse_cmp[0](NULL, src1, src2, stride, 16);
2666  else if(w==8 && h==8)
2667  return s->sse_cmp[1](NULL, src1, src2, stride, 8);
2668 
2669  for(y=0; y<h; y++){
2670  for(x=0; x<w; x++){
2671  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2672  }
2673  }
2674 
2675  av_assert2(acc>=0);
2676 
2677  return acc;
2678 }
2679 
2680 static int sse_mb(MpegEncContext *s){
2681  int w= 16;
2682  int h= 16;
2683  int chroma_mb_w = w >> s->chroma_x_shift;
2684  int chroma_mb_h = h >> s->chroma_y_shift;
2685 
2686  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2687  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2688 
2689  if(w==16 && h==16)
2690  return s->n_sse_cmp[0](s, s->new_pic->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2691  s->dest[0], s->linesize, 16) +
2692  s->n_sse_cmp[1](s, s->new_pic->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2693  s->dest[1], s->uvlinesize, chroma_mb_h) +
2694  s->n_sse_cmp[1](s, s->new_pic->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2695  s->dest[2], s->uvlinesize, chroma_mb_h);
2696  else
2697  return sse(s, s->new_pic->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2698  s->dest[0], w, h, s->linesize) +
2699  sse(s, s->new_pic->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2700  s->dest[1], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize) +
2701  sse(s, s->new_pic->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2702  s->dest[2], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize);
2703 }
2704 
2706  MpegEncContext *s= *(void**)arg;
2707 
2708 
2709  s->me.pre_pass=1;
2710  s->me.dia_size= s->avctx->pre_dia_size;
2711  s->first_slice_line=1;
2712  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2713  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2714  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2715  }
2716  s->first_slice_line=0;
2717  }
2718 
2719  s->me.pre_pass=0;
2720 
2721  return 0;
2722 }
2723 
2725  MpegEncContext *s= *(void**)arg;
2726 
2727  s->me.dia_size= s->avctx->dia_size;
2728  s->first_slice_line=1;
2729  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2730  s->mb_x=0; //for block init below
2732  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2733  s->block_index[0]+=2;
2734  s->block_index[1]+=2;
2735  s->block_index[2]+=2;
2736  s->block_index[3]+=2;
2737 
2738  /* compute motion vector & mb_type and store in context */
2739  if(s->pict_type==AV_PICTURE_TYPE_B)
2740  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2741  else
2742  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2743  }
2744  s->first_slice_line=0;
2745  }
2746  return 0;
2747 }
2748 
2749 static int mb_var_thread(AVCodecContext *c, void *arg){
2750  MpegEncContext *s= *(void**)arg;
2751  int mb_x, mb_y;
2752 
2753  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2754  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2755  int xx = mb_x * 16;
2756  int yy = mb_y * 16;
2757  const uint8_t *pix = s->new_pic->data[0] + (yy * s->linesize) + xx;
2758  int varc;
2759  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2760 
2761  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2762  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2763 
2764  s->mb_var [s->mb_stride * mb_y + mb_x] = varc;
2765  s->mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2766  s->me.mb_var_sum_temp += varc;
2767  }
2768  }
2769  return 0;
2770 }
2771 
2773  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2774  if(s->partitioned_frame){
2776  }
2777 
2778  ff_mpeg4_stuffing(&s->pb);
2779  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2780  s->out_format == FMT_MJPEG) {
2782  } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2784  }
2785 
2786  flush_put_bits(&s->pb);
2787 
2788  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2789  s->misc_bits+= get_bits_diff(s);
2790 }
2791 
2793 {
2794  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2795  int offset = put_bits_count(&s->pb);
2796  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2797  int gobn = s->mb_y / s->gob_index;
2798  int pred_x, pred_y;
2799  if (CONFIG_H263_ENCODER)
2800  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2801  bytestream_put_le32(&ptr, offset);
2802  bytestream_put_byte(&ptr, s->qscale);
2803  bytestream_put_byte(&ptr, gobn);
2804  bytestream_put_le16(&ptr, mba);
2805  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2806  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2807  /* 4MV not implemented */
2808  bytestream_put_byte(&ptr, 0); /* hmv2 */
2809  bytestream_put_byte(&ptr, 0); /* vmv2 */
2810 }
2811 
2812 static void update_mb_info(MpegEncContext *s, int startcode)
2813 {
2814  if (!s->mb_info)
2815  return;
2816  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2817  s->mb_info_size += 12;
2818  s->prev_mb_info = s->last_mb_info;
2819  }
2820  if (startcode) {
2821  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2822  /* This might have incremented mb_info_size above, and we return without
2823  * actually writing any info into that slot yet. But in that case,
2824  * this will be called again at the start of the after writing the
2825  * start code, actually writing the mb info. */
2826  return;
2827  }
2828 
2829  s->last_mb_info = put_bytes_count(&s->pb, 0);
2830  if (!s->mb_info_size)
2831  s->mb_info_size += 12;
2832  write_mb_info(s);
2833 }
2834 
2835 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2836 {
2837  if (put_bytes_left(&s->pb, 0) < threshold
2838  && s->slice_context_count == 1
2839  && s->pb.buf == s->avctx->internal->byte_buffer) {
2840  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2841 
2842  uint8_t *new_buffer = NULL;
2843  int new_buffer_size = 0;
2844 
2845  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2846  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2847  return AVERROR(ENOMEM);
2848  }
2849 
2850  emms_c();
2851 
2852  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2853  s->avctx->internal->byte_buffer_size + size_increase);
2854  if (!new_buffer)
2855  return AVERROR(ENOMEM);
2856 
2857  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2858  av_free(s->avctx->internal->byte_buffer);
2859  s->avctx->internal->byte_buffer = new_buffer;
2860  s->avctx->internal->byte_buffer_size = new_buffer_size;
2861  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2862  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2863  }
2864  if (put_bytes_left(&s->pb, 0) < threshold)
2865  return AVERROR(EINVAL);
2866  return 0;
2867 }
2868 
2869 static int encode_thread(AVCodecContext *c, void *arg){
2870  MpegEncContext *s= *(void**)arg;
2871  int mb_x, mb_y, mb_y_order;
2872  int chr_h= 16>>s->chroma_y_shift;
2873  int i, j;
2874  MpegEncContext best_s = { 0 }, backup_s;
2875  uint8_t bit_buf[2][MAX_MB_BYTES];
2876  uint8_t bit_buf2[2][MAX_MB_BYTES];
2877  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2878  PutBitContext pb[2], pb2[2], tex_pb[2];
2879 
2880  for(i=0; i<2; i++){
2881  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2882  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2883  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2884  }
2885 
2886  s->last_bits= put_bits_count(&s->pb);
2887  s->mv_bits=0;
2888  s->misc_bits=0;
2889  s->i_tex_bits=0;
2890  s->p_tex_bits=0;
2891  s->i_count=0;
2892 
2893  for(i=0; i<3; i++){
2894  /* init last dc values */
2895  /* note: quant matrix value (8) is implied here */
2896  s->last_dc[i] = 128 << s->intra_dc_precision;
2897 
2898  s->encoding_error[i] = 0;
2899  }
2900  if(s->codec_id==AV_CODEC_ID_AMV){
2901  s->last_dc[0] = 128*8/13;
2902  s->last_dc[1] = 128*8/14;
2903  s->last_dc[2] = 128*8/14;
2904  }
2905  s->mb_skip_run = 0;
2906  memset(s->last_mv, 0, sizeof(s->last_mv));
2907 
2908  s->last_mv_dir = 0;
2909 
2910  switch(s->codec_id){
2911  case AV_CODEC_ID_H263:
2912  case AV_CODEC_ID_H263P:
2913  case AV_CODEC_ID_FLV1:
2914  if (CONFIG_H263_ENCODER)
2915  s->gob_index = H263_GOB_HEIGHT(s->height);
2916  break;
2917  case AV_CODEC_ID_MPEG4:
2918  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2920  break;
2921  }
2922 
2923  s->resync_mb_x=0;
2924  s->resync_mb_y=0;
2925  s->first_slice_line = 1;
2926  s->ptr_lastgob = s->pb.buf;
2927  for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2928  if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2929  int first_in_slice;
2930  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2931  if (first_in_slice && mb_y_order != s->start_mb_y)
2933  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2934  } else {
2935  mb_y = mb_y_order;
2936  }
2937  s->mb_x=0;
2938  s->mb_y= mb_y;
2939 
2940  ff_set_qscale(s, s->qscale);
2942 
2943  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2944  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2945  int mb_type= s->mb_type[xy];
2946 // int d;
2947  int dmin= INT_MAX;
2948  int dir;
2949  int size_increase = s->avctx->internal->byte_buffer_size/4
2950  + s->mb_width*MAX_MB_BYTES;
2951 
2953  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2954  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2955  return -1;
2956  }
2957  if(s->data_partitioning){
2958  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2959  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2960  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2961  return -1;
2962  }
2963  }
2964 
2965  s->mb_x = mb_x;
2966  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2967  ff_update_block_index(s, 8, 0, s->chroma_x_shift);
2968 
2969  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2971  xy= s->mb_y*s->mb_stride + s->mb_x;
2972  mb_type= s->mb_type[xy];
2973  }
2974 
2975  /* write gob / video packet header */
2976  if(s->rtp_mode){
2977  int current_packet_size, is_gob_start;
2978 
2979  current_packet_size = put_bytes_count(&s->pb, 1)
2980  - (s->ptr_lastgob - s->pb.buf);
2981 
2982  is_gob_start = s->rtp_payload_size &&
2983  current_packet_size >= s->rtp_payload_size &&
2984  mb_y + mb_x > 0;
2985 
2986  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2987 
2988  switch(s->codec_id){
2989  case AV_CODEC_ID_H263:
2990  case AV_CODEC_ID_H263P:
2991  if(!s->h263_slice_structured)
2992  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2993  break;
2995  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2997  if(s->mb_skip_run) is_gob_start=0;
2998  break;
2999  case AV_CODEC_ID_MJPEG:
3000  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3001  break;
3002  }
3003 
3004  if(is_gob_start){
3005  if(s->start_mb_y != mb_y || mb_x!=0){
3006  write_slice_end(s);
3007 
3008  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3010  }
3011  }
3012 
3013  av_assert2((put_bits_count(&s->pb)&7) == 0);
3014  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3015 
3016  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3017  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3018  int d = 100 / s->error_rate;
3019  if(r % d == 0){
3020  current_packet_size=0;
3021  s->pb.buf_ptr= s->ptr_lastgob;
3022  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3023  }
3024  }
3025 
3026  switch(s->codec_id){
3027  case AV_CODEC_ID_MPEG4:
3028  if (CONFIG_MPEG4_ENCODER) {
3031  }
3032  break;
3035  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3038  }
3039  break;
3040  case AV_CODEC_ID_H263:
3041  case AV_CODEC_ID_H263P:
3042  if (CONFIG_H263_ENCODER) {
3043  update_mb_info(s, 1);
3045  }
3046  break;
3047  }
3048 
3049  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3050  int bits= put_bits_count(&s->pb);
3051  s->misc_bits+= bits - s->last_bits;
3052  s->last_bits= bits;
3053  }
3054 
3055  s->ptr_lastgob += current_packet_size;
3056  s->first_slice_line=1;
3057  s->resync_mb_x=mb_x;
3058  s->resync_mb_y=mb_y;
3059  }
3060  }
3061 
3062  if( (s->resync_mb_x == s->mb_x)
3063  && s->resync_mb_y+1 == s->mb_y){
3064  s->first_slice_line=0;
3065  }
3066 
3067  s->mb_skipped=0;
3068  s->dquant=0; //only for QP_RD
3069 
3070  update_mb_info(s, 0);
3071 
3072  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3073  int next_block=0;
3074  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3075 
3076  copy_context_before_encode(&backup_s, s);
3077  backup_s.pb= s->pb;
3078  best_s.data_partitioning= s->data_partitioning;
3079  best_s.partitioned_frame= s->partitioned_frame;
3080  if(s->data_partitioning){
3081  backup_s.pb2= s->pb2;
3082  backup_s.tex_pb= s->tex_pb;
3083  }
3084 
3086  s->mv_dir = MV_DIR_FORWARD;
3087  s->mv_type = MV_TYPE_16X16;
3088  s->mb_intra= 0;
3089  s->mv[0][0][0] = s->p_mv_table[xy][0];
3090  s->mv[0][0][1] = s->p_mv_table[xy][1];
3091  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3092  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3093  }
3095  s->mv_dir = MV_DIR_FORWARD;
3096  s->mv_type = MV_TYPE_FIELD;
3097  s->mb_intra= 0;
3098  for(i=0; i<2; i++){
3099  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3100  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3101  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3102  }
3103  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3104  &dmin, &next_block, 0, 0);
3105  }
3107  s->mv_dir = MV_DIR_FORWARD;
3108  s->mv_type = MV_TYPE_16X16;
3109  s->mb_intra= 0;
3110  s->mv[0][0][0] = 0;
3111  s->mv[0][0][1] = 0;
3112  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3113  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3114  }
3116  s->mv_dir = MV_DIR_FORWARD;
3117  s->mv_type = MV_TYPE_8X8;
3118  s->mb_intra= 0;
3119  for(i=0; i<4; i++){
3120  s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0];
3121  s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1];
3122  }
3123  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3124  &dmin, &next_block, 0, 0);
3125  }
3127  s->mv_dir = MV_DIR_FORWARD;
3128  s->mv_type = MV_TYPE_16X16;
3129  s->mb_intra= 0;
3130  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3131  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3132  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3133  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3134  }
3136  s->mv_dir = MV_DIR_BACKWARD;
3137  s->mv_type = MV_TYPE_16X16;
3138  s->mb_intra= 0;
3139  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3140  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3141  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3142  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3143  }
3145  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3146  s->mv_type = MV_TYPE_16X16;
3147  s->mb_intra= 0;
3148  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3149  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3150  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3151  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3152  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3153  &dmin, &next_block, 0, 0);
3154  }
3156  s->mv_dir = MV_DIR_FORWARD;
3157  s->mv_type = MV_TYPE_FIELD;
3158  s->mb_intra= 0;
3159  for(i=0; i<2; i++){
3160  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3161  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3162  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3163  }
3164  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3165  &dmin, &next_block, 0, 0);
3166  }
3168  s->mv_dir = MV_DIR_BACKWARD;
3169  s->mv_type = MV_TYPE_FIELD;
3170  s->mb_intra= 0;
3171  for(i=0; i<2; i++){
3172  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3173  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3174  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3175  }
3176  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3177  &dmin, &next_block, 0, 0);
3178  }
3180  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3181  s->mv_type = MV_TYPE_FIELD;
3182  s->mb_intra= 0;
3183  for(dir=0; dir<2; dir++){
3184  for(i=0; i<2; i++){
3185  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3186  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3187  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3188  }
3189  }
3190  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3191  &dmin, &next_block, 0, 0);
3192  }
3194  s->mv_dir = 0;
3195  s->mv_type = MV_TYPE_16X16;
3196  s->mb_intra= 1;
3197  s->mv[0][0][0] = 0;
3198  s->mv[0][0][1] = 0;
3199  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3200  &dmin, &next_block, 0, 0);
3201  if(s->h263_pred || s->h263_aic){
3202  if(best_s.mb_intra)
3203  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3204  else
3205  ff_clean_intra_table_entries(s); //old mode?
3206  }
3207  }
3208 
3209  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3210  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3211  const int last_qp= backup_s.qscale;
3212  int qpi, qp, dc[6];
3213  int16_t ac[6][16];
3214  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3215  static const int dquant_tab[4]={-1,1,-2,2};
3216  int storecoefs = s->mb_intra && s->dc_val[0];
3217 
3218  av_assert2(backup_s.dquant == 0);
3219 
3220  //FIXME intra
3221  s->mv_dir= best_s.mv_dir;
3222  s->mv_type = MV_TYPE_16X16;
3223  s->mb_intra= best_s.mb_intra;
3224  s->mv[0][0][0] = best_s.mv[0][0][0];
3225  s->mv[0][0][1] = best_s.mv[0][0][1];
3226  s->mv[1][0][0] = best_s.mv[1][0][0];
3227  s->mv[1][0][1] = best_s.mv[1][0][1];
3228 
3229  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3230  for(; qpi<4; qpi++){
3231  int dquant= dquant_tab[qpi];
3232  qp= last_qp + dquant;
3233  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3234  continue;
3235  backup_s.dquant= dquant;
3236  if(storecoefs){
3237  for(i=0; i<6; i++){
3238  dc[i]= s->dc_val[0][ s->block_index[i] ];
3239  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3240  }
3241  }
3242 
3243  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3244  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3245  if(best_s.qscale != qp){
3246  if(storecoefs){
3247  for(i=0; i<6; i++){
3248  s->dc_val[0][ s->block_index[i] ]= dc[i];
3249  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3250  }
3251  }
3252  }
3253  }
3254  }
3255  }
3256  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3257  int mx= s->b_direct_mv_table[xy][0];
3258  int my= s->b_direct_mv_table[xy][1];
3259 
3260  backup_s.dquant = 0;
3261  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3262  s->mb_intra= 0;
3264  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3265  &dmin, &next_block, mx, my);
3266  }
3267  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3268  backup_s.dquant = 0;
3269  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3270  s->mb_intra= 0;
3271  ff_mpeg4_set_direct_mv(s, 0, 0);
3272  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3273  &dmin, &next_block, 0, 0);
3274  }
3275  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3276  int coded=0;
3277  for(i=0; i<6; i++)
3278  coded |= s->block_last_index[i];
3279  if(coded){
3280  int mx,my;
3281  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3282  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3283  mx=my=0; //FIXME find the one we actually used
3285  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3286  mx= s->mv[1][0][0];
3287  my= s->mv[1][0][1];
3288  }else{
3289  mx= s->mv[0][0][0];
3290  my= s->mv[0][0][1];
3291  }
3292 
3293  s->mv_dir= best_s.mv_dir;
3294  s->mv_type = best_s.mv_type;
3295  s->mb_intra= 0;
3296 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3297  s->mv[0][0][1] = best_s.mv[0][0][1];
3298  s->mv[1][0][0] = best_s.mv[1][0][0];
3299  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3300  backup_s.dquant= 0;
3301  s->skipdct=1;
3302  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3303  &dmin, &next_block, mx, my);
3304  s->skipdct=0;
3305  }
3306  }
3307 
3308  s->cur_pic.qscale_table[xy] = best_s.qscale;
3309 
3310  copy_context_after_encode(s, &best_s);
3311 
3312  pb_bits_count= put_bits_count(&s->pb);
3313  flush_put_bits(&s->pb);
3314  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3315  s->pb= backup_s.pb;
3316 
3317  if(s->data_partitioning){
3318  pb2_bits_count= put_bits_count(&s->pb2);
3319  flush_put_bits(&s->pb2);
3320  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3321  s->pb2= backup_s.pb2;
3322 
3323  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3324  flush_put_bits(&s->tex_pb);
3325  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3326  s->tex_pb= backup_s.tex_pb;
3327  }
3328  s->last_bits= put_bits_count(&s->pb);
3329 
3330  if (CONFIG_H263_ENCODER &&
3331  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3333 
3334  if(next_block==0){ //FIXME 16 vs linesize16
3335  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3336  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3337  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3338  }
3339 
3340  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3341  mpv_reconstruct_mb(s, s->block);
3342  } else {
3343  int motion_x = 0, motion_y = 0;
3344  s->mv_type=MV_TYPE_16X16;
3345  // only one MB-Type possible
3346 
3347  switch(mb_type){
3349  s->mv_dir = 0;
3350  s->mb_intra= 1;
3351  motion_x= s->mv[0][0][0] = 0;
3352  motion_y= s->mv[0][0][1] = 0;
3353  break;
3355  s->mv_dir = MV_DIR_FORWARD;
3356  s->mb_intra= 0;
3357  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3358  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3359  break;
3361  s->mv_dir = MV_DIR_FORWARD;
3362  s->mv_type = MV_TYPE_FIELD;
3363  s->mb_intra= 0;
3364  for(i=0; i<2; i++){
3365  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3366  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3367  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3368  }
3369  break;
3371  s->mv_dir = MV_DIR_FORWARD;
3372  s->mv_type = MV_TYPE_8X8;
3373  s->mb_intra= 0;
3374  for(i=0; i<4; i++){
3375  s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0];
3376  s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1];
3377  }
3378  break;
3380  if (CONFIG_MPEG4_ENCODER) {
3382  s->mb_intra= 0;
3383  motion_x=s->b_direct_mv_table[xy][0];
3384  motion_y=s->b_direct_mv_table[xy][1];
3385  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3386  }
3387  break;
3389  if (CONFIG_MPEG4_ENCODER) {
3391  s->mb_intra= 0;
3392  ff_mpeg4_set_direct_mv(s, 0, 0);
3393  }
3394  break;
3396  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3397  s->mb_intra= 0;
3398  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3399  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3400  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3401  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3402  break;
3404  s->mv_dir = MV_DIR_BACKWARD;
3405  s->mb_intra= 0;
3406  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3407  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3408  break;
3410  s->mv_dir = MV_DIR_FORWARD;
3411  s->mb_intra= 0;
3412  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3413  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3414  break;
3416  s->mv_dir = MV_DIR_FORWARD;
3417  s->mv_type = MV_TYPE_FIELD;
3418  s->mb_intra= 0;
3419  for(i=0; i<2; i++){
3420  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3421  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3422  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3423  }
3424  break;
3426  s->mv_dir = MV_DIR_BACKWARD;
3427  s->mv_type = MV_TYPE_FIELD;
3428  s->mb_intra= 0;
3429  for(i=0; i<2; i++){
3430  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3431  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3432  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3433  }
3434  break;
3436  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3437  s->mv_type = MV_TYPE_FIELD;
3438  s->mb_intra= 0;
3439  for(dir=0; dir<2; dir++){
3440  for(i=0; i<2; i++){
3441  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3442  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3443  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3444  }
3445  }
3446  break;
3447  default:
3448  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3449  }
3450 
3451  encode_mb(s, motion_x, motion_y);
3452 
3453  // RAL: Update last macroblock type
3454  s->last_mv_dir = s->mv_dir;
3455 
3456  if (CONFIG_H263_ENCODER &&
3457  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3459 
3460  mpv_reconstruct_mb(s, s->block);
3461  }
3462 
3463  /* clean the MV table in IPS frames for direct mode in B-frames */
3464  if(s->mb_intra /* && I,P,S_TYPE */){
3465  s->p_mv_table[xy][0]=0;
3466  s->p_mv_table[xy][1]=0;
3467  }
3468 
3469  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3470  int w= 16;
3471  int h= 16;
3472 
3473  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3474  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3475 
3476  s->encoding_error[0] += sse(
3477  s, s->new_pic->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3478  s->dest[0], w, h, s->linesize);
3479  s->encoding_error[1] += sse(
3480  s, s->new_pic->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3481  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3482  s->encoding_error[2] += sse(
3483  s, s->new_pic->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3484  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3485  }
3486  if(s->loop_filter){
3487  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3489  }
3490  ff_dlog(s->avctx, "MB %d %d bits\n",
3491  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3492  }
3493  }
3494 
3495 #if CONFIG_MSMPEG4ENC
3496  //not beautiful here but we must write it before flushing so it has to be here
3497  if (s->msmpeg4_version != MSMP4_UNUSED && s->msmpeg4_version < MSMP4_WMV1 &&
3498  s->pict_type == AV_PICTURE_TYPE_I)
3500 #endif
3501 
3502  write_slice_end(s);
3503 
3504  return 0;
3505 }
3506 
3507 #define MERGE(field) dst->field += src->field; src->field=0
3509  MERGE(me.scene_change_score);
3510  MERGE(me.mc_mb_var_sum_temp);
3511  MERGE(me.mb_var_sum_temp);
3512 }
3513 
3515  int i;
3516 
3517  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3518  MERGE(dct_count[1]);
3519  MERGE(mv_bits);
3520  MERGE(i_tex_bits);
3521  MERGE(p_tex_bits);
3522  MERGE(i_count);
3523  MERGE(misc_bits);
3524  MERGE(encoding_error[0]);
3525  MERGE(encoding_error[1]);
3526  MERGE(encoding_error[2]);
3527 
3528  if (dst->noise_reduction){
3529  for(i=0; i<64; i++){
3530  MERGE(dct_error_sum[0][i]);
3531  MERGE(dct_error_sum[1][i]);
3532  }
3533  }
3534 
3535  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3536  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3537  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3538  flush_put_bits(&dst->pb);
3539 }
3540 
3541 static int estimate_qp(MpegEncContext *s, int dry_run){
3542  if (s->next_lambda){
3543  s->cur_pic.ptr->f->quality = s->next_lambda;
3544  if(!dry_run) s->next_lambda= 0;
3545  } else if (!s->fixed_qscale) {
3546  int quality = ff_rate_estimate_qscale(s, dry_run);
3547  s->cur_pic.ptr->f->quality = quality;
3548  if (s->cur_pic.ptr->f->quality < 0)
3549  return -1;
3550  }
3551 
3552  if(s->adaptive_quant){
3553  init_qscale_tab(s);
3554 
3555  switch(s->codec_id){
3556  case AV_CODEC_ID_MPEG4:
3557  if (CONFIG_MPEG4_ENCODER)
3559  break;
3560  case AV_CODEC_ID_H263:
3561  case AV_CODEC_ID_H263P:
3562  case AV_CODEC_ID_FLV1:
3563  if (CONFIG_H263_ENCODER)
3565  break;
3566  }
3567 
3568  s->lambda= s->lambda_table[0];
3569  //FIXME broken
3570  }else
3571  s->lambda = s->cur_pic.ptr->f->quality;
3572  update_qscale(s);
3573  return 0;
3574 }
3575 
3576 /* must be called before writing the header */
3578  av_assert1(s->cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3579  s->time = s->cur_pic.ptr->f->pts * s->avctx->time_base.num;
3580 
3581  if(s->pict_type==AV_PICTURE_TYPE_B){
3582  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3583  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3584  }else{
3585  s->pp_time= s->time - s->last_non_b_time;
3586  s->last_non_b_time= s->time;
3587  av_assert1(s->picture_number==0 || s->pp_time > 0);
3588  }
3589 }
3590 
3592 {
3593  int i, ret;
3594  int bits;
3595  int context_count = s->slice_context_count;
3596 
3597  /* Reset the average MB variance */
3598  s->me.mb_var_sum_temp =
3599  s->me.mc_mb_var_sum_temp = 0;
3600 
3601  /* we need to initialize some time vars before we can encode B-frames */
3602  // RAL: Condition added for MPEG1VIDEO
3603  if (s->out_format == FMT_MPEG1 || (s->h263_pred && s->msmpeg4_version == MSMP4_UNUSED))
3605  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3607 
3608  s->me.scene_change_score=0;
3609 
3610 // s->lambda= s->cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3611 
3612  if(s->pict_type==AV_PICTURE_TYPE_I){
3613  s->no_rounding = s->msmpeg4_version >= MSMP4_V3;
3614  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3615  s->no_rounding ^= s->flipflop_rounding;
3616  }
3617 
3618  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3619  if (estimate_qp(s,1) < 0)
3620  return -1;
3622  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3623  if(s->pict_type==AV_PICTURE_TYPE_B)
3624  s->lambda= s->last_lambda_for[s->pict_type];
3625  else
3626  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3627  update_qscale(s);
3628  }
3629 
3630  if (s->out_format != FMT_MJPEG) {
3631  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3632  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3633  s->q_chroma_intra_matrix = s->q_intra_matrix;
3634  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3635  }
3636 
3637  ff_me_init_pic(s);
3638 
3639  s->mb_intra=0; //for the rate distortion & bit compare functions
3640  for (int i = 0; i < context_count; i++) {
3641  MpegEncContext *const slice = s->thread_context[i];
3642  uint8_t *start, *end;
3643  int h;
3644 
3645  if (i) {
3646  ret = ff_update_duplicate_context(slice, s);
3647  if (ret < 0)
3648  return ret;
3649  }
3650  slice->me.temp = slice->me.scratchpad = slice->sc.scratchpad_buf;
3651 
3652  h = s->mb_height;
3653  start = pkt->data + (size_t)(((int64_t) pkt->size) * slice->start_mb_y / h);
3654  end = pkt->data + (size_t)(((int64_t) pkt->size) * slice-> end_mb_y / h);
3655 
3656  init_put_bits(&s->thread_context[i]->pb, start, end - start);
3657  }
3658 
3659  /* Estimate motion for every MB */
3660  if(s->pict_type != AV_PICTURE_TYPE_I){
3661  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3662  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3663  if (s->pict_type != AV_PICTURE_TYPE_B) {
3664  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3665  s->me_pre == 2) {
3666  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3667  }
3668  }
3669 
3670  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3671  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3672  /* I-Frame */
3673  for(i=0; i<s->mb_stride*s->mb_height; i++)
3674  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3675 
3676  if(!s->fixed_qscale){
3677  /* finding spatial complexity for I-frame rate control */
3678  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3679  }
3680  }
3681  for(i=1; i<context_count; i++){
3682  merge_context_after_me(s, s->thread_context[i]);
3683  }
3684  s->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3685  s->mb_var_sum = s->me. mb_var_sum_temp;
3686  emms_c();
3687 
3688  if (s->me.scene_change_score > s->scenechange_threshold &&
3689  s->pict_type == AV_PICTURE_TYPE_P) {
3690  s->pict_type= AV_PICTURE_TYPE_I;
3691  for(i=0; i<s->mb_stride*s->mb_height; i++)
3692  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3693  if (s->msmpeg4_version >= MSMP4_V3)
3694  s->no_rounding=1;
3695  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3696  s->mb_var_sum, s->mc_mb_var_sum);
3697  }
3698 
3699  if(!s->umvplus){
3700  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3701  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3702 
3703  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3704  int a,b;
3705  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3706  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3707  s->f_code= FFMAX3(s->f_code, a, b);
3708  }
3709 
3711  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3712  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3713  int j;
3714  for(i=0; i<2; i++){
3715  for(j=0; j<2; j++)
3716  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3717  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3718  }
3719  }
3720  } else if (s->pict_type == AV_PICTURE_TYPE_B) {
3721  int a, b;
3722 
3723  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3724  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3725  s->f_code = FFMAX(a, b);
3726 
3727  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3728  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3729  s->b_code = FFMAX(a, b);
3730 
3731  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3732  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3733  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3734  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3735  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3736  int dir, j;
3737  for(dir=0; dir<2; dir++){
3738  for(i=0; i<2; i++){
3739  for(j=0; j<2; j++){
3742  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3743  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3744  }
3745  }
3746  }
3747  }
3748  }
3749  }
3750 
3751  if (estimate_qp(s, 0) < 0)
3752  return -1;
3753 
3754  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3755  s->pict_type == AV_PICTURE_TYPE_I &&
3756  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3757  s->qscale= 3; //reduce clipping problems
3758 
3759  if (s->out_format == FMT_MJPEG) {
3760  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3761  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3762 
3763  if (s->avctx->intra_matrix) {
3764  chroma_matrix =
3765  luma_matrix = s->avctx->intra_matrix;
3766  }
3767  if (s->avctx->chroma_intra_matrix)
3768  chroma_matrix = s->avctx->chroma_intra_matrix;
3769 
3770  /* for mjpeg, we do include qscale in the matrix */
3771  for(i=1;i<64;i++){
3772  int j = s->idsp.idct_permutation[i];
3773 
3774  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3775  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3776  }
3777  s->y_dc_scale_table=
3778  s->c_dc_scale_table = ff_mpeg12_dc_scale_table[s->intra_dc_precision];
3779  s->chroma_intra_matrix[0] =
3780  s->intra_matrix[0] = ff_mpeg12_dc_scale_table[s->intra_dc_precision][8];
3781  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3782  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3783  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3784  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3785  s->qscale= 8;
3786 
3787  if (s->codec_id == AV_CODEC_ID_AMV) {
3788  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3789  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3790  for (int i = 1; i < 64; i++) {
3791  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
3792 
3793  s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3794  s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3795  }
3796  s->y_dc_scale_table = y;
3797  s->c_dc_scale_table = c;
3798  s->intra_matrix[0] = 13;
3799  s->chroma_intra_matrix[0] = 14;
3800  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3801  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3802  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3803  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3804  s->qscale = 8;
3805  }
3806  }
3807 
3808  if (s->pict_type == AV_PICTURE_TYPE_I) {
3809  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3810  } else {
3811  s->cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3812  }
3813  s->cur_pic.ptr->f->pict_type = s->pict_type;
3814 
3815  if (s->cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3816  s->picture_in_gop_number=0;
3817 
3818  s->mb_x = s->mb_y = 0;
3819  s->last_bits= put_bits_count(&s->pb);
3820  switch(s->out_format) {
3821 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3822  case FMT_MJPEG:
3824  break;
3825 #endif
3826  case FMT_SPEEDHQ:
3827  if (CONFIG_SPEEDHQ_ENCODER)
3829  break;
3830  case FMT_H261:
3831  if (CONFIG_H261_ENCODER)
3833  break;
3834  case FMT_H263:
3835  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3837 #if CONFIG_MSMPEG4ENC
3838  else if (s->msmpeg4_version != MSMP4_UNUSED)
3840 #endif
3841  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3843  if (ret < 0)
3844  return ret;
3845  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3847  if (ret < 0)
3848  return ret;
3849  }
3850  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3852  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3854  else if (CONFIG_H263_ENCODER)
3856  break;
3857  case FMT_MPEG1:
3858  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3860  break;
3861  default:
3862  av_assert0(0);
3863  }
3864  bits= put_bits_count(&s->pb);
3865  s->header_bits= bits - s->last_bits;
3866 
3867  for(i=1; i<context_count; i++){
3868  update_duplicate_context_after_me(s->thread_context[i], s);
3869  }
3870  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3871  for(i=1; i<context_count; i++){
3872  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3873  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3874  merge_context_after_encode(s, s->thread_context[i]);
3875  }
3876  emms_c();
3877  return 0;
3878 }
3879 
3880 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3881  const int intra= s->mb_intra;
3882  int i;
3883 
3884  s->dct_count[intra]++;
3885 
3886  for(i=0; i<64; i++){
3887  int level= block[i];
3888 
3889  if(level){
3890  if(level>0){
3891  s->dct_error_sum[intra][i] += level;
3892  level -= s->dct_offset[intra][i];
3893  if(level<0) level=0;
3894  }else{
3895  s->dct_error_sum[intra][i] -= level;
3896  level += s->dct_offset[intra][i];
3897  if(level>0) level=0;
3898  }
3899  block[i]= level;
3900  }
3901  }
3902 }
3903 
3905  int16_t *block, int n,
3906  int qscale, int *overflow){
3907  const int *qmat;
3908  const uint16_t *matrix;
3909  const uint8_t *scantable;
3910  const uint8_t *perm_scantable;
3911  int max=0;
3912  unsigned int threshold1, threshold2;
3913  int bias=0;
3914  int run_tab[65];
3915  int level_tab[65];
3916  int score_tab[65];
3917  int survivor[65];
3918  int survivor_count;
3919  int last_run=0;
3920  int last_level=0;
3921  int last_score= 0;
3922  int last_i;
3923  int coeff[2][64];
3924  int coeff_count[64];
3925  int qmul, qadd, start_i, last_non_zero, i, dc;
3926  const int esc_length= s->ac_esc_length;
3927  const uint8_t *length, *last_length;
3928  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3929  int mpeg2_qscale;
3930 
3931  s->fdsp.fdct(block);
3932 
3933  if(s->dct_error_sum)
3934  s->denoise_dct(s, block);
3935  qmul= qscale*16;
3936  qadd= ((qscale-1)|1)*8;
3937 
3938  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3939  else mpeg2_qscale = qscale << 1;
3940 
3941  if (s->mb_intra) {
3942  int q;
3943  scantable= s->intra_scantable.scantable;
3944  perm_scantable= s->intra_scantable.permutated;
3945  if (!s->h263_aic) {
3946  if (n < 4)
3947  q = s->y_dc_scale;
3948  else
3949  q = s->c_dc_scale;
3950  q = q << 3;
3951  } else{
3952  /* For AIC we skip quant/dequant of INTRADC */
3953  q = 1 << 3;
3954  qadd=0;
3955  }
3956 
3957  /* note: block[0] is assumed to be positive */
3958  block[0] = (block[0] + (q >> 1)) / q;
3959  start_i = 1;
3960  last_non_zero = 0;
3961  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3962  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3963  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3964  bias= 1<<(QMAT_SHIFT-1);
3965 
3966  if (n > 3 && s->intra_chroma_ac_vlc_length) {
3967  length = s->intra_chroma_ac_vlc_length;
3968  last_length= s->intra_chroma_ac_vlc_last_length;
3969  } else {
3970  length = s->intra_ac_vlc_length;
3971  last_length= s->intra_ac_vlc_last_length;
3972  }
3973  } else {
3974  scantable= s->inter_scantable.scantable;
3975  perm_scantable= s->inter_scantable.permutated;
3976  start_i = 0;
3977  last_non_zero = -1;
3978  qmat = s->q_inter_matrix[qscale];
3979  matrix = s->inter_matrix;
3980  length = s->inter_ac_vlc_length;
3981  last_length= s->inter_ac_vlc_last_length;
3982  }
3983  last_i= start_i;
3984 
3985  threshold1= (1<<QMAT_SHIFT) - bias - 1;
3986  threshold2= (threshold1<<1);
3987 
3988  for(i=63; i>=start_i; i--) {
3989  const int j = scantable[i];
3990  int level = block[j] * qmat[j];
3991 
3992  if(((unsigned)(level+threshold1))>threshold2){
3993  last_non_zero = i;
3994  break;
3995  }
3996  }
3997 
3998  for(i=start_i; i<=last_non_zero; i++) {
3999  const int j = scantable[i];
4000  int level = block[j] * qmat[j];
4001 
4002 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4003 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4004  if(((unsigned)(level+threshold1))>threshold2){
4005  if(level>0){
4006  level= (bias + level)>>QMAT_SHIFT;
4007  coeff[0][i]= level;
4008  coeff[1][i]= level-1;
4009 // coeff[2][k]= level-2;
4010  }else{
4011  level= (bias - level)>>QMAT_SHIFT;
4012  coeff[0][i]= -level;
4013  coeff[1][i]= -level+1;
4014 // coeff[2][k]= -level+2;
4015  }
4016  coeff_count[i]= FFMIN(level, 2);
4017  av_assert2(coeff_count[i]);
4018  max |=level;
4019  }else{
4020  coeff[0][i]= (level>>31)|1;
4021  coeff_count[i]= 1;
4022  }
4023  }
4024 
4025  *overflow= s->max_qcoeff < max; //overflow might have happened
4026 
4027  if(last_non_zero < start_i){
4028  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4029  return last_non_zero;
4030  }
4031 
4032  score_tab[start_i]= 0;
4033  survivor[0]= start_i;
4034  survivor_count= 1;
4035 
4036  for(i=start_i; i<=last_non_zero; i++){
4037  int level_index, j, zero_distortion;
4038  int dct_coeff= FFABS(block[ scantable[i] ]);
4039  int best_score=256*256*256*120;
4040 
4041  if (s->fdsp.fdct == ff_fdct_ifast)
4042  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4043  zero_distortion= dct_coeff*dct_coeff;
4044 
4045  for(level_index=0; level_index < coeff_count[i]; level_index++){
4046  int distortion;
4047  int level= coeff[level_index][i];
4048  const int alevel= FFABS(level);
4049  int unquant_coeff;
4050 
4051  av_assert2(level);
4052 
4053  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4054  unquant_coeff= alevel*qmul + qadd;
4055  } else if(s->out_format == FMT_MJPEG) {
4056  j = s->idsp.idct_permutation[scantable[i]];
4057  unquant_coeff = alevel * matrix[j] * 8;
4058  }else{ // MPEG-1
4059  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4060  if(s->mb_intra){
4061  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4062  unquant_coeff = (unquant_coeff - 1) | 1;
4063  }else{
4064  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4065  unquant_coeff = (unquant_coeff - 1) | 1;
4066  }
4067  unquant_coeff<<= 3;
4068  }
4069 
4070  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4071  level+=64;
4072  if((level&(~127)) == 0){
4073  for(j=survivor_count-1; j>=0; j--){
4074  int run= i - survivor[j];
4075  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4076  score += score_tab[i-run];
4077 
4078  if(score < best_score){
4079  best_score= score;
4080  run_tab[i+1]= run;
4081  level_tab[i+1]= level-64;
4082  }
4083  }
4084 
4085  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4086  for(j=survivor_count-1; j>=0; j--){
4087  int run= i - survivor[j];
4088  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4089  score += score_tab[i-run];
4090  if(score < last_score){
4091  last_score= score;
4092  last_run= run;
4093  last_level= level-64;
4094  last_i= i+1;
4095  }
4096  }
4097  }
4098  }else{
4099  distortion += esc_length*lambda;
4100  for(j=survivor_count-1; j>=0; j--){
4101  int run= i - survivor[j];
4102  int score= distortion + score_tab[i-run];
4103 
4104  if(score < best_score){
4105  best_score= score;
4106  run_tab[i+1]= run;
4107  level_tab[i+1]= level-64;
4108  }
4109  }
4110 
4111  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4112  for(j=survivor_count-1; j>=0; j--){
4113  int run= i - survivor[j];
4114  int score= distortion + score_tab[i-run];
4115  if(score < last_score){
4116  last_score= score;
4117  last_run= run;
4118  last_level= level-64;
4119  last_i= i+1;
4120  }
4121  }
4122  }
4123  }
4124  }
4125 
4126  score_tab[i+1]= best_score;
4127 
4128  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4129  if(last_non_zero <= 27){
4130  for(; survivor_count; survivor_count--){
4131  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4132  break;
4133  }
4134  }else{
4135  for(; survivor_count; survivor_count--){
4136  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4137  break;
4138  }
4139  }
4140 
4141  survivor[ survivor_count++ ]= i+1;
4142  }
4143 
4144  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4145  last_score= 256*256*256*120;
4146  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4147  int score= score_tab[i];
4148  if (i)
4149  score += lambda * 2; // FIXME more exact?
4150 
4151  if(score < last_score){
4152  last_score= score;
4153  last_i= i;
4154  last_level= level_tab[i];
4155  last_run= run_tab[i];
4156  }
4157  }
4158  }
4159 
4160  s->coded_score[n] = last_score;
4161 
4162  dc= FFABS(block[0]);
4163  last_non_zero= last_i - 1;
4164  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4165 
4166  if(last_non_zero < start_i)
4167  return last_non_zero;
4168 
4169  if(last_non_zero == 0 && start_i == 0){
4170  int best_level= 0;
4171  int best_score= dc * dc;
4172 
4173  for(i=0; i<coeff_count[0]; i++){
4174  int level= coeff[i][0];
4175  int alevel= FFABS(level);
4176  int unquant_coeff, score, distortion;
4177 
4178  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4179  unquant_coeff= (alevel*qmul + qadd)>>3;
4180  } else{ // MPEG-1
4181  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4182  unquant_coeff = (unquant_coeff - 1) | 1;
4183  }
4184  unquant_coeff = (unquant_coeff + 4) >> 3;
4185  unquant_coeff<<= 3 + 3;
4186 
4187  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4188  level+=64;
4189  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4190  else score= distortion + esc_length*lambda;
4191 
4192  if(score < best_score){
4193  best_score= score;
4194  best_level= level - 64;
4195  }
4196  }
4197  block[0]= best_level;
4198  s->coded_score[n] = best_score - dc*dc;
4199  if(best_level == 0) return -1;
4200  else return last_non_zero;
4201  }
4202 
4203  i= last_i;
4204  av_assert2(last_level);
4205 
4206  block[ perm_scantable[last_non_zero] ]= last_level;
4207  i -= last_run + 1;
4208 
4209  for(; i>start_i; i -= run_tab[i] + 1){
4210  block[ perm_scantable[i-1] ]= level_tab[i];
4211  }
4212 
4213  return last_non_zero;
4214 }
4215 
4216 static int16_t basis[64][64];
4217 
4218 static void build_basis(uint8_t *perm){
4219  int i, j, x, y;
4220  emms_c();
4221  for(i=0; i<8; i++){
4222  for(j=0; j<8; j++){
4223  for(y=0; y<8; y++){
4224  for(x=0; x<8; x++){
4225  double s= 0.25*(1<<BASIS_SHIFT);
4226  int index= 8*i + j;
4227  int perm_index= perm[index];
4228  if(i==0) s*= sqrt(0.5);
4229  if(j==0) s*= sqrt(0.5);
4230  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4231  }
4232  }
4233  }
4234  }
4235 }
4236 
4237 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4238  int16_t *block, int16_t *weight, int16_t *orig,
4239  int n, int qscale){
4240  int16_t rem[64];
4241  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4242  const uint8_t *scantable;
4243  const uint8_t *perm_scantable;
4244 // unsigned int threshold1, threshold2;
4245 // int bias=0;
4246  int run_tab[65];
4247  int prev_run=0;
4248  int prev_level=0;
4249  int qmul, qadd, start_i, last_non_zero, i, dc;
4250  const uint8_t *length;
4251  const uint8_t *last_length;
4252  int lambda;
4253  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4254 
4255  if(basis[0][0] == 0)
4256  build_basis(s->idsp.idct_permutation);
4257 
4258  qmul= qscale*2;
4259  qadd= (qscale-1)|1;
4260  if (s->mb_intra) {
4261  scantable= s->intra_scantable.scantable;
4262  perm_scantable= s->intra_scantable.permutated;
4263  if (!s->h263_aic) {
4264  if (n < 4)
4265  q = s->y_dc_scale;
4266  else
4267  q = s->c_dc_scale;
4268  } else{
4269  /* For AIC we skip quant/dequant of INTRADC */
4270  q = 1;
4271  qadd=0;
4272  }
4273  q <<= RECON_SHIFT-3;
4274  /* note: block[0] is assumed to be positive */
4275  dc= block[0]*q;
4276 // block[0] = (block[0] + (q >> 1)) / q;
4277  start_i = 1;
4278 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4279 // bias= 1<<(QMAT_SHIFT-1);
4280  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4281  length = s->intra_chroma_ac_vlc_length;
4282  last_length= s->intra_chroma_ac_vlc_last_length;
4283  } else {
4284  length = s->intra_ac_vlc_length;
4285  last_length= s->intra_ac_vlc_last_length;
4286  }
4287  } else {
4288  scantable= s->inter_scantable.scantable;
4289  perm_scantable= s->inter_scantable.permutated;
4290  dc= 0;
4291  start_i = 0;
4292  length = s->inter_ac_vlc_length;
4293  last_length= s->inter_ac_vlc_last_length;
4294  }
4295  last_non_zero = s->block_last_index[n];
4296 
4297  dc += (1<<(RECON_SHIFT-1));
4298  for(i=0; i<64; i++){
4299  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4300  }
4301 
4302  sum=0;
4303  for(i=0; i<64; i++){
4304  int one= 36;
4305  int qns=4;
4306  int w;
4307 
4308  w= FFABS(weight[i]) + qns*one;
4309  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4310 
4311  weight[i] = w;
4312 // w=weight[i] = (63*qns + (w/2)) / w;
4313 
4314  av_assert2(w>0);
4315  av_assert2(w<(1<<6));
4316  sum += w*w;
4317  }
4318  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4319 
4320  run=0;
4321  rle_index=0;
4322  for(i=start_i; i<=last_non_zero; i++){
4323  int j= perm_scantable[i];
4324  const int level= block[j];
4325  int coeff;
4326 
4327  if(level){
4328  if(level<0) coeff= qmul*level - qadd;
4329  else coeff= qmul*level + qadd;
4330  run_tab[rle_index++]=run;
4331  run=0;
4332 
4333  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4334  }else{
4335  run++;
4336  }
4337  }
4338 
4339  for(;;){
4340  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4341  int best_coeff=0;
4342  int best_change=0;
4343  int run2, best_unquant_change=0, analyze_gradient;
4344  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4345 
4346  if(analyze_gradient){
4347  for(i=0; i<64; i++){
4348  int w= weight[i];
4349 
4350  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4351  }
4352  s->fdsp.fdct(d1);
4353  }
4354 
4355  if(start_i){
4356  const int level= block[0];
4357  int change, old_coeff;
4358 
4359  av_assert2(s->mb_intra);
4360 
4361  old_coeff= q*level;
4362 
4363  for(change=-1; change<=1; change+=2){
4364  int new_level= level + change;
4365  int score, new_coeff;
4366 
4367  new_coeff= q*new_level;
4368  if(new_coeff >= 2048 || new_coeff < 0)
4369  continue;
4370 
4371  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4372  new_coeff - old_coeff);
4373  if(score<best_score){
4374  best_score= score;
4375  best_coeff= 0;
4376  best_change= change;
4377  best_unquant_change= new_coeff - old_coeff;
4378  }
4379  }
4380  }
4381 
4382  run=0;
4383  rle_index=0;
4384  run2= run_tab[rle_index++];
4385  prev_level=0;
4386  prev_run=0;
4387 
4388  for(i=start_i; i<64; i++){
4389  int j= perm_scantable[i];
4390  const int level= block[j];
4391  int change, old_coeff;
4392 
4393  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4394  break;
4395 
4396  if(level){
4397  if(level<0) old_coeff= qmul*level - qadd;
4398  else old_coeff= qmul*level + qadd;
4399  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4400  }else{
4401  old_coeff=0;
4402  run2--;
4403  av_assert2(run2>=0 || i >= last_non_zero );
4404  }
4405 
4406  for(change=-1; change<=1; change+=2){
4407  int new_level= level + change;
4408  int score, new_coeff, unquant_change;
4409 
4410  score=0;
4411  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4412  continue;
4413 
4414  if(new_level){
4415  if(new_level<0) new_coeff= qmul*new_level - qadd;
4416  else new_coeff= qmul*new_level + qadd;
4417  if(new_coeff >= 2048 || new_coeff <= -2048)
4418  continue;
4419  //FIXME check for overflow
4420 
4421  if(level){
4422  if(level < 63 && level > -63){
4423  if(i < last_non_zero)
4424  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4425  - length[UNI_AC_ENC_INDEX(run, level+64)];
4426  else
4427  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4428  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4429  }
4430  }else{
4431  av_assert2(FFABS(new_level)==1);
4432 
4433  if(analyze_gradient){
4434  int g= d1[ scantable[i] ];
4435  if(g && (g^new_level) >= 0)
4436  continue;
4437  }
4438 
4439  if(i < last_non_zero){
4440  int next_i= i + run2 + 1;
4441  int next_level= block[ perm_scantable[next_i] ] + 64;
4442 
4443  if(next_level&(~127))
4444  next_level= 0;
4445 
4446  if(next_i < last_non_zero)
4447  score += length[UNI_AC_ENC_INDEX(run, 65)]
4448  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4449  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4450  else
4451  score += length[UNI_AC_ENC_INDEX(run, 65)]
4452  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4453  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4454  }else{
4455  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4456  if(prev_level){
4457  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4458  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4459  }
4460  }
4461  }
4462  }else{
4463  new_coeff=0;
4464  av_assert2(FFABS(level)==1);
4465 
4466  if(i < last_non_zero){
4467  int next_i= i + run2 + 1;
4468  int next_level= block[ perm_scantable[next_i] ] + 64;
4469 
4470  if(next_level&(~127))
4471  next_level= 0;
4472 
4473  if(next_i < last_non_zero)
4474  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4475  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4476  - length[UNI_AC_ENC_INDEX(run, 65)];
4477  else
4478  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4479  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4480  - length[UNI_AC_ENC_INDEX(run, 65)];
4481  }else{
4482  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4483  if(prev_level){
4484  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4485  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4486  }
4487  }
4488  }
4489 
4490  score *= lambda;
4491 
4492  unquant_change= new_coeff - old_coeff;
4493  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4494 
4495  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4496  unquant_change);
4497  if(score<best_score){
4498  best_score= score;
4499  best_coeff= i;
4500  best_change= change;
4501  best_unquant_change= unquant_change;
4502  }
4503  }
4504  if(level){
4505  prev_level= level + 64;
4506  if(prev_level&(~127))
4507  prev_level= 0;
4508  prev_run= run;
4509  run=0;
4510  }else{
4511  run++;
4512  }
4513  }
4514 
4515  if(best_change){
4516  int j= perm_scantable[ best_coeff ];
4517 
4518  block[j] += best_change;
4519 
4520  if(best_coeff > last_non_zero){
4521  last_non_zero= best_coeff;
4522  av_assert2(block[j]);
4523  }else{
4524  for(; last_non_zero>=start_i; last_non_zero--){
4525  if(block[perm_scantable[last_non_zero]])
4526  break;
4527  }
4528  }
4529 
4530  run=0;
4531  rle_index=0;
4532  for(i=start_i; i<=last_non_zero; i++){
4533  int j= perm_scantable[i];
4534  const int level= block[j];
4535 
4536  if(level){
4537  run_tab[rle_index++]=run;
4538  run=0;
4539  }else{
4540  run++;
4541  }
4542  }
4543 
4544  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4545  }else{
4546  break;
4547  }
4548  }
4549 
4550  return last_non_zero;
4551 }
4552 
4553 /**
4554  * Permute an 8x8 block according to permutation.
4555  * @param block the block which will be permuted according to
4556  * the given permutation vector
4557  * @param permutation the permutation vector
4558  * @param last the last non zero coefficient in scantable order, used to
4559  * speed the permutation up
4560  * @param scantable the used scantable, this is only used to speed the
4561  * permutation up, the block is not (inverse) permutated
4562  * to scantable order!
4563  */
4564 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4565  const uint8_t *scantable, int last)
4566 {
4567  int i;
4568  int16_t temp[64];
4569 
4570  if (last <= 0)
4571  return;
4572  //FIXME it is ok but not clean and might fail for some permutations
4573  // if (permutation[1] == 1)
4574  // return;
4575 
4576  for (i = 0; i <= last; i++) {
4577  const int j = scantable[i];
4578  temp[j] = block[j];
4579  block[j] = 0;
4580  }
4581 
4582  for (i = 0; i <= last; i++) {
4583  const int j = scantable[i];
4584  const int perm_j = permutation[j];
4585  block[perm_j] = temp[j];
4586  }
4587 }
4588 
4590  int16_t *block, int n,
4591  int qscale, int *overflow)
4592 {
4593  int i, j, level, last_non_zero, q, start_i;
4594  const int *qmat;
4595  const uint8_t *scantable;
4596  int bias;
4597  int max=0;
4598  unsigned int threshold1, threshold2;
4599 
4600  s->fdsp.fdct(block);
4601 
4602  if(s->dct_error_sum)
4603  s->denoise_dct(s, block);
4604 
4605  if (s->mb_intra) {
4606  scantable= s->intra_scantable.scantable;
4607  if (!s->h263_aic) {
4608  if (n < 4)
4609  q = s->y_dc_scale;
4610  else
4611  q = s->c_dc_scale;
4612  q = q << 3;
4613  } else
4614  /* For AIC we skip quant/dequant of INTRADC */
4615  q = 1 << 3;
4616 
4617  /* note: block[0] is assumed to be positive */
4618  block[0] = (block[0] + (q >> 1)) / q;
4619  start_i = 1;
4620  last_non_zero = 0;
4621  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4622  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4623  } else {
4624  scantable= s->inter_scantable.scantable;
4625  start_i = 0;
4626  last_non_zero = -1;
4627  qmat = s->q_inter_matrix[qscale];
4628  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4629  }
4630  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4631  threshold2= (threshold1<<1);
4632  for(i=63;i>=start_i;i--) {
4633  j = scantable[i];
4634  level = block[j] * qmat[j];
4635 
4636  if(((unsigned)(level+threshold1))>threshold2){
4637  last_non_zero = i;
4638  break;
4639  }else{
4640  block[j]=0;
4641  }
4642  }
4643  for(i=start_i; i<=last_non_zero; i++) {
4644  j = scantable[i];
4645  level = block[j] * qmat[j];
4646 
4647 // if( bias+level >= (1<<QMAT_SHIFT)
4648 // || bias-level >= (1<<QMAT_SHIFT)){
4649  if(((unsigned)(level+threshold1))>threshold2){
4650  if(level>0){
4651  level= (bias + level)>>QMAT_SHIFT;
4652  block[j]= level;
4653  }else{
4654  level= (bias - level)>>QMAT_SHIFT;
4655  block[j]= -level;
4656  }
4657  max |=level;
4658  }else{
4659  block[j]=0;
4660  }
4661  }
4662  *overflow= s->max_qcoeff < max; //overflow might have happened
4663 
4664  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4665  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4666  ff_block_permute(block, s->idsp.idct_permutation,
4667  scantable, last_non_zero);
4668 
4669  return last_non_zero;
4670 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1365
MpegEncContext::i_count
int i_count
Definition: mpegvideo.h:345
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:31
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:687
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:84
ff_speedhq_end_slice
void ff_speedhq_end_slice(MpegEncContext *s)
Definition: speedhqenc.c:155
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:235
MpegEncContext::mb_skipped
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:190
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:429
encode_picture
static int encode_picture(MpegEncContext *s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3591
MpegEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegvideoenc.h)
Definition: mpegvideo.h:290
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:48
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:265
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
h263data.h
ff_speedhq_encode_init
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
Definition: speedhqenc.c:98
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:105
level
uint8_t level
Definition: svq3.c:205
MpegEncContext::data_partitioning
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:388
av_clip
#define av_clip
Definition: common.h:100
set_frame_distances
static void set_frame_distances(MpegEncContext *s)
Definition: mpegvideo_enc.c:3577
get_bits_diff
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideoenc.h:158
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:541
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:201
H263_GOB_HEIGHT
#define H263_GOB_HEIGHT(h)
Definition: h263.h:28
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
r
const char * r
Definition: vf_curves.c:127
ff_h261_encode_init
av_cold int ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:346
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:819
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:222
ff_mpeg1_encode_mb
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
MpegEncContext::b_code
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:229
mem_internal.h
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:308
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:483
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:292
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1309
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1667
ff_speedhq_encode_picture_header
void ff_speedhq_encode_picture_header(MpegEncContext *s)
Definition: speedhqenc.c:143
ff_wmv2_encode_picture_header
int ff_wmv2_encode_picture_header(MpegEncContext *s)
Definition: wmv2enc.c:97
thread.h
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
matrix
Definition: vc1dsp.c:43
ff_h261_encode_picture_header
void ff_h261_encode_picture_header(MpegEncContext *s)
Definition: h261enc.c:63
src1
const pixel * src1
Definition: h264pred_template.c:421
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:224
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
sse_mb
static int sse_mb(MpegEncContext *s)
Definition: mpegvideo_enc.c:2680
mpv_reconstruct_mb_template.c
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
COPY
#define COPY(a)
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4216
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:980
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:164
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2724
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:840
update_noise_reduction
static void update_noise_reduction(MpegEncContext *s)
Definition: mpegvideo_enc.c:1763
out_size
int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:263
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
MpegEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:389
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:41
MpegEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:331
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:459
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:501
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:38
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:206
w
uint8_t w
Definition: llviddspenc.c:38
qmat16
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.h:356
BUF_BITS
static const int BUF_BITS
Definition: put_bits.h:48
internal.h
ff_h261_encode_mb
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:234
AVPacket::data
uint8_t * data
Definition: packet.h:539
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:376
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:41
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:100
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:180
data
const char data[16]
Definition: mxf.c:148
MpegEncContext::vbv_delay
int vbv_delay
Definition: mpegvideo.h:207
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
prepare_picture
static int prepare_picture(MpegEncContext *s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1152
MpegEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideo.h:219
ff_mjpeg_encode_init
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:294
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac for the current non-intra MB.
Definition: mpegvideo.c:796
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:88
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:820
ff_msmpeg4_encode_mb
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:385
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:557
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:58
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:497
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:326
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2705
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2118
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
MpegEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideo.h:148
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:968
wmv2enc.h
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:64
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1273
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:351
mpegutils.h
ff_flv_encode_picture_header
void ff_flv_encode_picture_header(MpegEncContext *s)
Definition: flvenc.c:28
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:228
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:607
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:594
AVCodecContext::mb_cmp
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:894
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
MpegEncContext::mv
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:275
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:52
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:262
MECmpContext::sum_abs_dctelem
int(* sum_abs_dctelem)(const int16_t *block)
Definition: me_cmp.h:56
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:127
MAX_FCODE
#define MAX_FCODE
Definition: mpegutils.h:36
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:910
MpegEncContext::encoding_error
uint64_t encoding_error[MPV_MAX_PLANES]
Definition: mpegvideo.h:255
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1799
skip_check
static int skip_check(MpegEncContext *s, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1324
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:84
sp5x.h
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:66
estimate_qp
static int estimate_qp(MpegEncContext *s, int dry_run)
Definition: mpegvideo_enc.c:3541
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
FDCTDSPContext
Definition: fdctdsp.h:28
faandct.h
Floating point AAN DCT.
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:206
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:197
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:63
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:826
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:460
mpeg12enc.h
merge_context_after_me
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3508
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:179
frame_start
static void frame_start(MpegEncContext *s)
Definition: mpegvideo_enc.c:1784
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:463
fail
#define fail()
Definition: checkasm.h:188
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:138
ff_refstruct_pool_uninit
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:67
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:996
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:508
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:43
perm
perm
Definition: f_perms.c:75
weight
const h264_weight_func weight
Definition: h264dsp_init.c:33
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1241
FF_ALLOC_TYPED_ARRAY
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:77
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:334
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:61
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:318
ff_sqrt
#define ff_sqrt
Definition: mathops.h:216
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
mpv_encode_init_static
static void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:269
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:449
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
refstruct.h
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:330
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:45
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2835
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:50
ff_mjpeg_encode_mb
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:523
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:895
MpegEncContext::i_tex_bits
int i_tex_bits
Definition: mpegvideo.h:343
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:774
RateControlContext
rate control context.
Definition: ratecontrol.h:60
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
update_mb_info
static void update_mb_info(MpegEncContext *s, int startcode)
Definition: mpegvideo_enc.c:2812
av_cold
#define av_cold
Definition: attributes.h:90
MAX_MV
#define MAX_MV
Definition: motion_est.h:36
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:640
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:97
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:129
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_enc.c:1094
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4218
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:729
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
ff_h263_chroma_qscale_table
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:260
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1111
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:112
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:460
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MpegEncContext::mv_dir
int mv_dir
Definition: mpegvideo.h:260
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:863
MpegEncContext::mb_skip_run
int mb_skip_run
Definition: mpegvideo.h:288
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1502
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:150
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1585
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:224
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
init_qscale_tab
static void init_qscale_tab(MpegEncContext *s)
init s->cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:240
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1302
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4564
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1537
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:271
PutBitContext
Definition: put_bits.h:50
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:46
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:861
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2749
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
me_cmp_init
static av_cold int me_cmp_init(MpegEncContext *s, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:309
ff_mpv_alloc_pic_pool
av_cold FFRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1287
MECmpContext
Definition: me_cmp.h:55
MpegEncContext::pb2
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:392
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:51
ff_mpeg4_encode_mb
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:486
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
NULL
#define NULL
Definition: coverity.c:32
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpv_reconstruct_mb_template.c:56
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:713
write_mb_info
static void write_mb_info(MpegEncContext *s)
Definition: mpegvideo_enc.c:2792
run
uint8_t run
Definition: svq3.c:204
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:287
MpegEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideo.h:329
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:228
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:307
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:47
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:486
MECmpContext::sad
me_cmp_func sad[6]
Definition: me_cmp.h:58
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:501
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo_enc.c:253
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:853
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:275
ff_dct_encode_init
av_cold void ff_dct_encode_init(MpegEncContext *s)
Definition: mpegvideo_enc.c:294
mathops.h
dct_quantize_c
static int dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4589
MpegEncContext::mv_bits
int mv_bits
Definition: mpegvideo.h:341
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:347
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3507
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:900
ff_vbv_update
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:709
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1034
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:969
qpeldsp.h
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:143
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1337
MAY_BE_MPEG12_H261
#define MAY_BE_MPEG12_H261
Definition: mpv_reconstruct_mb_template.c:24
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
mpeg12codecs.h
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:281
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1314
ff_mjpeg_amv_encode_picture_header
void ff_mjpeg_amv_encode_picture_header(MpegEncContext *s)
Definition: mjpegenc.c:93
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
AVOnce
#define AVOnce
Definition: thread.h:202
ff_mpeg4_encode_picture_header
int ff_mpeg4_encode_picture_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1050
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_mpeg1_encode_init
void ff_mpeg1_encode_init(MpegEncContext *s)
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
ff_clean_h263_qscales
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
Definition: ituh263enc.c:273
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:266
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:847
mpv_encode_defaults
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:279
denoise_dct_c
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
Definition: mpegvideo_enc.c:3880
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:411
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1327
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:550
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:847
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1412
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:131
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1344
encode_mb_internal
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2142
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:236
f
f
Definition: af_crystalizer.c:122
merge_context_after_encode
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3514
MpegEncContext::mb_intra
int mb_intra
Definition: mpegvideo.h:289
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:82
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:55
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:540
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1037
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:199
load_input_picture
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1184
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:57
height
#define height
Definition: dsp.h:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:388
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
Definition: mpegvideo_enc.c:2028
MECmpContext::sse
me_cmp_func sse[6]
Definition: me_cmp.h:59
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:299
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:609
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1076
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:128
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:202
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:55
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
MpegEncContext::dct_count
int dct_count[2]
Definition: mpegvideo.h:330
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:280
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:40
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:99
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:60
MpegEncContext::interlaced_dct
int interlaced_dct
Definition: mpegvideo.h:467
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:281
MECmpContext::nsse
me_cmp_func nsse[6]
Definition: me_cmp.h:67
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
MpegEncContext::MSMP4_WMV2
@ MSMP4_WMV2
Definition: mpegvideo.h:425
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:478
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:538
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:314
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
MpegEncContext::mv_type
int mv_type
Definition: mpegvideo.h:264
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ff_me_init_pic
void ff_me_init_pic(MpegEncContext *s)
Definition: motion_est.c:370
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:268
ff_h263_encode_mb
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:493
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:545
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:286
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:38
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:36
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:53
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
ff_mpvenc_dct_init_mips
av_cold void ff_mpvenc_dct_init_mips(MpegEncContext *s)
Definition: mpegvideoenc_init_mips.c:26
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:42
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
update_qscale
static void update_qscale(MpegEncContext *s)
Definition: mpegvideo_enc.c:196
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:137
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:461
ff_msmpeg4_encode_picture_header
void ff_msmpeg4_encode_picture_header(MpegEncContext *s)
Definition: msmpeg4enc.c:218
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:301
emms.h
MpegEncContext::block_last_index
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:78
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
ff_speedhq_encode_mb
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: speedhqenc.c:252
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::last_mv
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
Definition: mpegvideo.h:277
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:465
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:459
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:532
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3904
get_intra_count
static int get_intra_count(MpegEncContext *s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1125
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2869
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:37
estimate_best_b_count
static int estimate_best_b_count(MpegEncContext *s)
Definition: mpegvideo_enc.c:1386
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:1003
MpegEncContext::esc3_level_length
int esc3_level_length
Definition: mpegvideo.h:429
src2
const pixel * src2
Definition: h264pred_template.c:422
MpegEncContext::tex_pb
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:391
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:276
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:287
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:113
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:830
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:637
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:197
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:911
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:610
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
copy_context_before_encode
static void copy_context_before_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2543
MpegEncContext::MSMP4_WMV1
@ MSMP4_WMV1
Definition: mpegvideo.h:424
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:247
AVCodecContext::height
int height
Definition: avcodec.h:624
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:508
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
idctdsp.h
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:352
ret
ret
Definition: filter_design.txt:187
me_cmp_func
int(* me_cmp_func)(struct MpegEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:50
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:292
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1389
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
encode_mb_hq
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2608
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:94
ff_mpeg1_encode_picture_header
void ff_mpeg1_encode_picture_header(MpegEncContext *s)
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:53
mpeg12data.h
ff_set_mpeg4_time
void ff_set_mpeg4_time(MpegEncContext *s)
Definition: mpeg4videoenc.c:871
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
ff_fix_long_mvs
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1716
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:476
MpegEncContext::dquant
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:205
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:462
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:868
AVCodecContext
main external API structure.
Definition: avcodec.h:451
default_mv_penalty
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:96
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:901
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1353
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:377
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:231
mpeg4video.h
MpegEncContext::last_bits
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:347
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1266
encode_mb
static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2533
AVRational::den
int den
Denominator.
Definition: rational.h:60
MpegEncContext::MSMP4_V3
@ MSMP4_V3
Definition: mpegvideo.h:423
select_input_picture
static int select_input_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:1668
MpegEncContext::MSMP4_UNUSED
@ MSMP4_UNUSED
Definition: mpegvideo.h:420
set_bframe_chain_length
static int set_bframe_chain_length(MpegEncContext *s)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1538
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:854
frame_end
static void frame_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:1727
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
temp
else temp
Definition: vf_mcdeint.c:263
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:866
flvenc.h
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:85
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:971
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_h263_encode_picture_header
void ff_h263_encode_picture_header(MpegEncContext *s)
Definition: ituh263enc.c:109
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:37
ff_speedhq_mb_y_order_to_mb
int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.c:280
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:801
ff_convert_matrix
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:112
packet_internal.h
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:351
MpegEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideo.h:545
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1316
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
dct_quantize_refine
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4237
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MpegEncContext *s)
Definition: rv10enc.c:34
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
copy_context_after_encode
static void copy_context_after_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2570
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:261
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1053
ff_h263_update_mb
void ff_h263_update_mb(MpegEncContext *s)
Definition: ituh263enc.c:690
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:970
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AVPacket
This structure stores compressed data.
Definition: packet.h:516
mpeg4videodata.h
clip_coeffs
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
Definition: mpegvideo_enc.c:2084
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:989
MAX_B_FRAMES
#define MAX_B_FRAMES
Definition: mpegvideo.h:51
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:253
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:434
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sse
static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2659
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:346
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:460
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: packet.c:609
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
ff_get_2pass_fcode
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:884
h
h
Definition: vp9dsp_template.c:2070
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:909
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:151
MpegEncContext::MSMP4_V2
@ MSMP4_V2
Definition: mpegvideo.h:422
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:85
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:59
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:150
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:282
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
ff_refstruct_unref
void ff_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:220
ff_wmv2_encode_mb
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:170
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:648
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:344
src
#define src
Definition: vp8dsp.c:248
ff_rv20_encode_picture_header
void ff_rv20_encode_picture_header(MpegEncContext *s)
Definition: rv20enc.c:37
ff_refstruct_pool_get
void * ff_refstruct_pool_get(FFRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
pixblockdsp.h
ff_get_best_fcode
int ff_get_best_fcode(MpegEncContext *s, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1614
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:951
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:310
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:458
h263.h
write_slice_end
static void write_slice_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:2772
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:700
intmath.h