FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <stdint.h>
37 
38 #include "libavutil/emms.h"
39 #include "libavutil/internal.h"
40 #include "libavutil/intmath.h"
41 #include "libavutil/mathematics.h"
42 #include "libavutil/mem.h"
43 #include "libavutil/mem_internal.h"
44 #include "libavutil/opt.h"
45 #include "libavutil/thread.h"
46 #include "avcodec.h"
47 #include "encode.h"
48 #include "idctdsp.h"
49 #include "mpeg12codecs.h"
50 #include "mpeg12data.h"
51 #include "mpeg12enc.h"
52 #include "mpegvideo.h"
53 #include "mpegvideodata.h"
54 #include "mpegvideoenc.h"
55 #include "h261enc.h"
56 #include "h263.h"
57 #include "h263data.h"
58 #include "h263enc.h"
59 #include "mjpegenc_common.h"
60 #include "mathops.h"
61 #include "mpegutils.h"
62 #include "mjpegenc.h"
63 #include "speedhqenc.h"
64 #include "msmpeg4enc.h"
65 #include "pixblockdsp.h"
66 #include "qpeldsp.h"
67 #include "faandct.h"
68 #include "aandcttab.h"
69 #include "flvenc.h"
70 #include "mpeg4video.h"
71 #include "mpeg4videodata.h"
72 #include "mpeg4videoenc.h"
73 #include "internal.h"
74 #include "bytestream.h"
75 #include "wmv2enc.h"
76 #include "rv10enc.h"
77 #include "packet_internal.h"
78 #include "refstruct.h"
79 #include <limits.h>
80 #include "sp5x.h"
81 
82 #define QUANT_BIAS_SHIFT 8
83 
84 #define QMAT_SHIFT_MMX 16
85 #define QMAT_SHIFT 21
86 
87 static int encode_picture(MpegEncContext *s, const AVPacket *pkt);
88 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
89 static int sse_mb(MpegEncContext *s);
90 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
91 static int dct_quantize_c(MpegEncContext *s,
92  int16_t *block, int n,
93  int qscale, int *overflow);
94 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
95 
96 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
97 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
98 
99 static const AVOption mpv_generic_options[] = {
102  { NULL },
103 };
104 
106  .class_name = "generic mpegvideo encoder",
107  .item_name = av_default_item_name,
108  .option = mpv_generic_options,
109  .version = LIBAVUTIL_VERSION_INT,
110 };
111 
112 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
113  uint16_t (*qmat16)[2][64],
114  const uint16_t *quant_matrix,
115  int bias, int qmin, int qmax, int intra)
116 {
117  FDCTDSPContext *fdsp = &s->fdsp;
118  int qscale;
119  int shift = 0;
120 
121  for (qscale = qmin; qscale <= qmax; qscale++) {
122  int i;
123  int qscale2;
124 
125  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
126  else qscale2 = qscale << 1;
127 
128  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
129 #if CONFIG_FAANDCT
130  fdsp->fdct == ff_faandct ||
131 #endif /* CONFIG_FAANDCT */
133  for (i = 0; i < 64; i++) {
134  const int j = s->idsp.idct_permutation[i];
135  int64_t den = (int64_t) qscale2 * quant_matrix[j];
136  /* 16 <= qscale * quant_matrix[i] <= 7905
137  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
138  * 19952 <= x <= 249205026
139  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
140  * 3444240 >= (1 << 36) / (x) >= 275 */
141 
142  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
143  }
144  } else if (fdsp->fdct == ff_fdct_ifast) {
145  for (i = 0; i < 64; i++) {
146  const int j = s->idsp.idct_permutation[i];
147  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
148  /* 16 <= qscale * quant_matrix[i] <= 7905
149  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
150  * 19952 <= x <= 249205026
151  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
152  * 3444240 >= (1 << 36) / (x) >= 275 */
153 
154  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
155  }
156  } else {
157  for (i = 0; i < 64; i++) {
158  const int j = s->idsp.idct_permutation[i];
159  int64_t den = (int64_t) qscale2 * quant_matrix[j];
160  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
161  * Assume x = qscale * quant_matrix[i]
162  * So 16 <= x <= 7905
163  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
164  * so 32768 >= (1 << 19) / (x) >= 67 */
165  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
166  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
167  // (qscale * quant_matrix[i]);
168  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
169 
170  if (qmat16[qscale][0][i] == 0 ||
171  qmat16[qscale][0][i] == 128 * 256)
172  qmat16[qscale][0][i] = 128 * 256 - 1;
173  qmat16[qscale][1][i] =
174  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
175  qmat16[qscale][0][i]);
176  }
177  }
178 
179  for (i = intra; i < 64; i++) {
180  int64_t max = 8191;
181  if (fdsp->fdct == ff_fdct_ifast) {
182  max = (8191LL * ff_aanscales[i]) >> 14;
183  }
184  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
185  shift++;
186  }
187  }
188  }
189  if (shift) {
190  av_log(s->avctx, AV_LOG_INFO,
191  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
192  QMAT_SHIFT - shift);
193  }
194 }
195 
196 static inline void update_qscale(MpegEncContext *s)
197 {
198  if (s->q_scale_type == 1 && 0) {
199  int i;
200  int bestdiff=INT_MAX;
201  int best = 1;
202 
203  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
204  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
205  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
206  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
207  continue;
208  if (diff < bestdiff) {
209  bestdiff = diff;
210  best = i;
211  }
212  }
213  s->qscale = best;
214  } else {
215  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
216  (FF_LAMBDA_SHIFT + 7);
217  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
218  }
219 
220  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
222 }
223 
225 {
226  int i;
227 
228  if (matrix) {
229  put_bits(pb, 1, 1);
230  for (i = 0; i < 64; i++) {
232  }
233  } else
234  put_bits(pb, 1, 0);
235 }
236 
237 /**
238  * init s->cur_pic.qscale_table from s->lambda_table
239  */
241 {
242  int8_t * const qscale_table = s->cur_pic.qscale_table;
243  int i;
244 
245  for (i = 0; i < s->mb_num; i++) {
246  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
247  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
248  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
249  s->avctx->qmax);
250  }
251 }
252 
254  const MpegEncContext *src)
255 {
256 #define COPY(a) dst->a= src->a
257  COPY(pict_type);
258  COPY(f_code);
259  COPY(b_code);
260  COPY(qscale);
261  COPY(lambda);
262  COPY(lambda2);
263  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
264  COPY(progressive_frame); // FIXME don't set in encode_header
265  COPY(partitioned_frame); // FIXME don't set in encode_header
266 #undef COPY
267 }
268 
269 static void mpv_encode_init_static(void)
270 {
271  for (int i = -16; i < 16; i++)
272  default_fcode_tab[i + MAX_MV] = 1;
273 }
274 
275 /**
276  * Set the given MpegEncContext to defaults for encoding.
277  * the changed fields will not depend upon the prior state of the MpegEncContext.
278  */
280 {
281  static AVOnce init_static_once = AV_ONCE_INIT;
282 
284 
285  ff_thread_once(&init_static_once, mpv_encode_init_static);
286 
287  s->me.mv_penalty = default_mv_penalty;
288  s->fcode_tab = default_fcode_tab;
289 
290  s->input_picture_number = 0;
291  s->picture_in_gop_number = 0;
292 }
293 
295 {
296  s->dct_quantize = dct_quantize_c;
297  s->denoise_dct = denoise_dct_c;
298 
299 #if ARCH_MIPS
301 #elif ARCH_X86
303 #endif
304 
305  if (s->avctx->trellis)
306  s->dct_quantize = dct_quantize_trellis_c;
307 }
308 
310 {
311  MECmpContext mecc;
312  me_cmp_func me_cmp[6];
313  int ret;
314 
315  ff_me_cmp_init(&mecc, avctx);
316  ret = ff_me_init(&s->me, avctx, &mecc, 1);
317  if (ret < 0)
318  return ret;
319  ret = ff_set_cmp(&mecc, me_cmp, s->frame_skip_cmp, 1);
320  if (ret < 0)
321  return ret;
322  s->frame_skip_cmp_fn = me_cmp[1];
324  ret = ff_set_cmp(&mecc, me_cmp, avctx->ildct_cmp, 1);
325  if (ret < 0)
326  return ret;
327  if (!me_cmp[0] || !me_cmp[4])
328  return AVERROR(EINVAL);
329  s->ildct_cmp[0] = me_cmp[0];
330  s->ildct_cmp[1] = me_cmp[4];
331  }
332 
333  s->sum_abs_dctelem = mecc.sum_abs_dctelem;
334 
335  s->sse_cmp[0] = mecc.sse[0];
336  s->sse_cmp[1] = mecc.sse[1];
337  s->sad_cmp[0] = mecc.sad[0];
338  s->sad_cmp[1] = mecc.sad[1];
339  if (avctx->mb_cmp == FF_CMP_NSSE) {
340  s->n_sse_cmp[0] = mecc.nsse[0];
341  s->n_sse_cmp[1] = mecc.nsse[1];
342  } else {
343  s->n_sse_cmp[0] = mecc.sse[0];
344  s->n_sse_cmp[1] = mecc.sse[1];
345  }
346 
347  return 0;
348 }
349 
350 /* init video encoder */
352 {
354  AVCPBProperties *cpb_props;
355  int i, ret;
356  int mb_array_size, mv_table_size;
357 
359 
360  switch (avctx->pix_fmt) {
361  case AV_PIX_FMT_YUVJ444P:
362  case AV_PIX_FMT_YUV444P:
363  s->chroma_format = CHROMA_444;
364  break;
365  case AV_PIX_FMT_YUVJ422P:
366  case AV_PIX_FMT_YUV422P:
367  s->chroma_format = CHROMA_422;
368  break;
369  case AV_PIX_FMT_YUVJ420P:
370  case AV_PIX_FMT_YUV420P:
371  default:
372  s->chroma_format = CHROMA_420;
373  break;
374  }
375 
377 
378  s->bit_rate = avctx->bit_rate;
379  s->width = avctx->width;
380  s->height = avctx->height;
381  if (avctx->gop_size > 600 &&
384  "keyframe interval too large!, reducing it from %d to %d\n",
385  avctx->gop_size, 600);
386  avctx->gop_size = 600;
387  }
388  s->gop_size = avctx->gop_size;
389  s->avctx = avctx;
391  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
392  "is %d.\n", MAX_B_FRAMES);
394  } else if (avctx->max_b_frames < 0) {
396  "max b frames must be 0 or positive for mpegvideo based encoders\n");
397  return AVERROR(EINVAL);
398  }
399  s->max_b_frames = avctx->max_b_frames;
400  s->codec_id = avctx->codec->id;
401  if (s->max_b_frames && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
402  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
403  return AVERROR(EINVAL);
404  }
405 
406  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
407  s->rtp_mode = !!s->rtp_payload_size;
408  s->intra_dc_precision = avctx->intra_dc_precision;
409 
410  // workaround some differences between how applications specify dc precision
411  if (s->intra_dc_precision < 0) {
412  s->intra_dc_precision += 8;
413  } else if (s->intra_dc_precision >= 8)
414  s->intra_dc_precision -= 8;
415 
416  if (s->intra_dc_precision < 0) {
418  "intra dc precision must be positive, note some applications use"
419  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
420  return AVERROR(EINVAL);
421  }
422 
423  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
424  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
425  return AVERROR(EINVAL);
426  }
427  s->user_specified_pts = AV_NOPTS_VALUE;
428 
429  if (s->gop_size <= 1) {
430  s->intra_only = 1;
431  s->gop_size = 12;
432  } else {
433  s->intra_only = 0;
434  }
435 
436  /* Fixed QSCALE */
437  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
438 
439  s->adaptive_quant = (avctx->lumi_masking ||
440  avctx->dark_masking ||
443  avctx->p_masking ||
444  s->border_masking ||
445  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
446  !s->fixed_qscale;
447 
448  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
449 
451  switch(avctx->codec_id) {
454  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
455  break;
456  case AV_CODEC_ID_MPEG4:
460  if (avctx->rc_max_rate >= 15000000) {
461  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
462  } else if(avctx->rc_max_rate >= 2000000) {
463  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
464  } else if(avctx->rc_max_rate >= 384000) {
465  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
466  } else
467  avctx->rc_buffer_size = 40;
468  avctx->rc_buffer_size *= 16384;
469  break;
470  }
471  if (avctx->rc_buffer_size) {
472  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
473  }
474  }
475 
476  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
477  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
478  return AVERROR(EINVAL);
479  }
480 
483  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
484  }
485 
487  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
488  return AVERROR(EINVAL);
489  }
490 
492  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
493  return AVERROR(EINVAL);
494  }
495 
496  if (avctx->rc_max_rate &&
500  "impossible bitrate constraints, this will fail\n");
501  }
502 
503  if (avctx->rc_buffer_size &&
506  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
507  return AVERROR(EINVAL);
508  }
509 
510  if (!s->fixed_qscale &&
513  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
515  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
516  if (nbt <= INT_MAX) {
517  avctx->bit_rate_tolerance = nbt;
518  } else
519  avctx->bit_rate_tolerance = INT_MAX;
520  }
521 
522  if (avctx->rc_max_rate &&
524  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
525  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
526  90000LL * (avctx->rc_buffer_size - 1) >
527  avctx->rc_max_rate * 0xFFFFLL) {
529  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
530  "specified vbv buffer is too large for the given bitrate!\n");
531  }
532 
533  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
534  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
535  s->codec_id != AV_CODEC_ID_FLV1) {
536  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
537  return AVERROR(EINVAL);
538  }
539 
540  if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
542  "OBMC is only supported with simple mb decision\n");
543  return AVERROR(EINVAL);
544  }
545 
546  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
547  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
548  return AVERROR(EINVAL);
549  }
550 
551  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
552  s->codec_id == AV_CODEC_ID_H263 ||
553  s->codec_id == AV_CODEC_ID_H263P) &&
554  (avctx->sample_aspect_ratio.num > 255 ||
555  avctx->sample_aspect_ratio.den > 255)) {
557  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
561  }
562 
563  if ((s->codec_id == AV_CODEC_ID_H263 ||
564  s->codec_id == AV_CODEC_ID_H263P) &&
565  (avctx->width > 2048 ||
566  avctx->height > 1152 )) {
567  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
568  return AVERROR(EINVAL);
569  }
570  if ((s->codec_id == AV_CODEC_ID_H263 ||
571  s->codec_id == AV_CODEC_ID_H263P ||
572  s->codec_id == AV_CODEC_ID_RV20) &&
573  ((avctx->width &3) ||
574  (avctx->height&3) )) {
575  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
576  return AVERROR(EINVAL);
577  }
578 
579  if (s->codec_id == AV_CODEC_ID_RV10 &&
580  (avctx->width &15 ||
581  avctx->height&15 )) {
582  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
583  return AVERROR(EINVAL);
584  }
585 
586  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
587  s->codec_id == AV_CODEC_ID_WMV2) &&
588  avctx->width & 1) {
589  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
590  return AVERROR(EINVAL);
591  }
592 
594  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
595  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
596  return AVERROR(EINVAL);
597  }
598 
599  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
600  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
601  return AVERROR(EINVAL);
602  }
603 
604  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
606  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
607  return AVERROR(EINVAL);
608  }
609 
610  if (s->scenechange_threshold < 1000000000 &&
613  "closed gop with scene change detection are not supported yet, "
614  "set threshold to 1000000000\n");
615  return AVERROR_PATCHWELCOME;
616  }
617 
619  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
622  "low delay forcing is only available for mpeg2, "
623  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
624  return AVERROR(EINVAL);
625  }
626  if (s->max_b_frames != 0) {
628  "B-frames cannot be used with low delay\n");
629  return AVERROR(EINVAL);
630  }
631  }
632 
633  if (s->q_scale_type == 1) {
634  if (avctx->qmax > 28) {
636  "non linear quant only supports qmax <= 28 currently\n");
637  return AVERROR_PATCHWELCOME;
638  }
639  }
640 
641  if (avctx->slices > 1 &&
643  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
644  return AVERROR(EINVAL);
645  }
646 
647  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
649  "notice: b_frame_strategy only affects the first pass\n");
650  s->b_frame_strategy = 0;
651  }
652 
654  if (i > 1) {
655  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
656  avctx->time_base.den /= i;
657  avctx->time_base.num /= i;
658  //return -1;
659  }
660 
661  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
662  // (a + x * 3 / 8) / x
663  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
664  s->inter_quant_bias = 0;
665  } else {
666  s->intra_quant_bias = 0;
667  // (a - x / 4) / x
668  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
669  }
670 
671  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
672  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
673  return AVERROR(EINVAL);
674  }
675 
676  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
677 
678  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
679  avctx->time_base.den > (1 << 16) - 1) {
681  "timebase %d/%d not supported by MPEG 4 standard, "
682  "the maximum admitted value for the timebase denominator "
683  "is %d\n", avctx->time_base.num, avctx->time_base.den,
684  (1 << 16) - 1);
685  return AVERROR(EINVAL);
686  }
687  s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
688 
689  switch (avctx->codec->id) {
690 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
692  s->rtp_mode = 1;
693  /* fallthrough */
695  s->out_format = FMT_MPEG1;
696  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
697  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
699  break;
700 #endif
701 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
702  case AV_CODEC_ID_MJPEG:
703  case AV_CODEC_ID_AMV:
704  s->out_format = FMT_MJPEG;
705  s->intra_only = 1; /* force intra only for jpeg */
706  if ((ret = ff_mjpeg_encode_init(s)) < 0)
707  return ret;
708  avctx->delay = 0;
709  s->low_delay = 1;
710  break;
711 #endif
712  case AV_CODEC_ID_SPEEDHQ:
713  s->out_format = FMT_SPEEDHQ;
714  s->intra_only = 1; /* force intra only for SHQ */
715  if (!CONFIG_SPEEDHQ_ENCODER)
717  if ((ret = ff_speedhq_encode_init(s)) < 0)
718  return ret;
719  avctx->delay = 0;
720  s->low_delay = 1;
721  break;
722  case AV_CODEC_ID_H261:
723  if (!CONFIG_H261_ENCODER)
726  if (ret < 0)
727  return ret;
728  s->out_format = FMT_H261;
729  avctx->delay = 0;
730  s->low_delay = 1;
731  s->rtp_mode = 0; /* Sliced encoding not supported */
732  break;
733  case AV_CODEC_ID_H263:
734  if (!CONFIG_H263_ENCODER)
737  s->width, s->height) == 8) {
739  "The specified picture size of %dx%d is not valid for "
740  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
741  "352x288, 704x576, and 1408x1152. "
742  "Try H.263+.\n", s->width, s->height);
743  return AVERROR(EINVAL);
744  }
745  s->out_format = FMT_H263;
746  avctx->delay = 0;
747  s->low_delay = 1;
748  break;
749  case AV_CODEC_ID_H263P:
750  s->out_format = FMT_H263;
751  s->h263_plus = 1;
752  /* Fx */
753  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
754  s->modified_quant = s->h263_aic;
755  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
756  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
757  s->flipflop_rounding = 1;
758 
759  /* /Fx */
760  /* These are just to be sure */
761  avctx->delay = 0;
762  s->low_delay = 1;
763  break;
764  case AV_CODEC_ID_FLV1:
765  s->out_format = FMT_H263;
766  s->h263_flv = 2; /* format = 1; 11-bit codes */
767  s->unrestricted_mv = 1;
768  s->rtp_mode = 0; /* don't allow GOB */
769  avctx->delay = 0;
770  s->low_delay = 1;
771  break;
772  case AV_CODEC_ID_RV10:
773  s->out_format = FMT_H263;
774  avctx->delay = 0;
775  s->low_delay = 1;
776  break;
777  case AV_CODEC_ID_RV20:
778  s->out_format = FMT_H263;
779  avctx->delay = 0;
780  s->low_delay = 1;
781  s->modified_quant = 1;
782  s->h263_aic = 1;
783  s->h263_plus = 1;
784  s->loop_filter = 1;
785  s->unrestricted_mv = 0;
786  break;
787  case AV_CODEC_ID_MPEG4:
788  s->out_format = FMT_H263;
789  s->h263_pred = 1;
790  s->unrestricted_mv = 1;
791  s->flipflop_rounding = 1;
792  s->low_delay = s->max_b_frames ? 0 : 1;
793  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
794  break;
796  s->out_format = FMT_H263;
797  s->h263_pred = 1;
798  s->unrestricted_mv = 1;
799  s->msmpeg4_version = MSMP4_V2;
800  avctx->delay = 0;
801  s->low_delay = 1;
802  break;
804  s->out_format = FMT_H263;
805  s->h263_pred = 1;
806  s->unrestricted_mv = 1;
807  s->msmpeg4_version = MSMP4_V3;
808  s->flipflop_rounding = 1;
809  avctx->delay = 0;
810  s->low_delay = 1;
811  break;
812  case AV_CODEC_ID_WMV1:
813  s->out_format = FMT_H263;
814  s->h263_pred = 1;
815  s->unrestricted_mv = 1;
816  s->msmpeg4_version = MSMP4_WMV1;
817  s->flipflop_rounding = 1;
818  avctx->delay = 0;
819  s->low_delay = 1;
820  break;
821  case AV_CODEC_ID_WMV2:
822  s->out_format = FMT_H263;
823  s->h263_pred = 1;
824  s->unrestricted_mv = 1;
825  s->msmpeg4_version = MSMP4_WMV2;
826  s->flipflop_rounding = 1;
827  avctx->delay = 0;
828  s->low_delay = 1;
829  break;
830  default:
831  return AVERROR(EINVAL);
832  }
833 
834  avctx->has_b_frames = !s->low_delay;
835 
836  s->encoding = 1;
837 
838  s->progressive_frame =
839  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
841  s->alternate_scan);
842 
843  if (s->lmin > s->lmax) {
844  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", s->lmax);
845  s->lmin = s->lmax;
846  }
847 
848  /* init */
850  if ((ret = ff_mpv_common_init(s)) < 0)
851  return ret;
852 
853  ff_fdctdsp_init(&s->fdsp, avctx);
854  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
855  ff_pixblockdsp_init(&s->pdsp, avctx);
856  ret = me_cmp_init(s, avctx);
857  if (ret < 0)
858  return ret;
859 
860  if (!(avctx->stats_out = av_mallocz(256)) ||
861  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
862  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
863  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
864  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
865  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
866  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
867  !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_B_FRAMES + 1) ||
868  !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_B_FRAMES + 1) ||
869  !(s->new_pic = av_frame_alloc()) ||
870  !(s->picture_pool = ff_mpv_alloc_pic_pool(0)))
871  return AVERROR(ENOMEM);
872 
873  /* Allocate MV tables; the MV and MB tables will be copied
874  * to slice contexts by ff_update_duplicate_context(). */
875  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
876  if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) ||
877  !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) ||
878  !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) ||
879  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_forw_mv_table_base, mv_table_size) ||
880  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_back_mv_table_base, mv_table_size) ||
881  !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size))
882  return AVERROR(ENOMEM);
883  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
884  s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
885  s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
886  s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
887  s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
888  s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
889 
890  /* Allocate MB type table */
891  mb_array_size = s->mb_stride * s->mb_height;
892  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) ||
893  !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) ||
894  !FF_ALLOC_TYPED_ARRAY (s->cplx_tab, mb_array_size) ||
895  !FF_ALLOC_TYPED_ARRAY (s->bits_tab, mb_array_size) ||
896  !FF_ALLOCZ_TYPED_ARRAY(s->mc_mb_var, mb_array_size) ||
897  !FF_ALLOCZ_TYPED_ARRAY(s->mb_var, mb_array_size) ||
898  !(s->mb_mean = av_mallocz(mb_array_size)))
899  return AVERROR(ENOMEM);
900 
901 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
902  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
903  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
904  int16_t (*tmp1)[2];
905  uint8_t *tmp2;
906  if (!(tmp1 = ALLOCZ_ARRAYS(s->b_field_mv_table_base, 8, mv_table_size)) ||
907  !(tmp2 = ALLOCZ_ARRAYS(s->b_field_select_table[0][0], 2 * 4, mv_table_size)) ||
908  !ALLOCZ_ARRAYS(s->p_field_select_table[0], 2 * 2, mv_table_size))
909  return AVERROR(ENOMEM);
910 
911  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
912  tmp1 += s->mb_stride + 1;
913 
914  for (int i = 0; i < 2; i++) {
915  for (int j = 0; j < 2; j++) {
916  for (int k = 0; k < 2; k++) {
917  s->b_field_mv_table[i][j][k] = tmp1;
918  tmp1 += mv_table_size;
919  }
920  s->b_field_select_table[i][j] = tmp2;
921  tmp2 += 2 * mv_table_size;
922  }
923  }
924  }
925 
926  if (s->noise_reduction) {
927  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
928  return AVERROR(ENOMEM);
929  }
930 
932 
933  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
934  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
935  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
936  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
937  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
938  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
939  } else {
940  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
941  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
942  }
943 
944  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
945  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
946 
947  if (s->slice_context_count > 1) {
948  s->rtp_mode = 1;
949 
951  s->h263_slice_structured = 1;
952  }
953 
954  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263) {
956 #if CONFIG_MSMPEG4ENC
957  if (s->msmpeg4_version != MSMP4_UNUSED)
959 #endif
960  }
961 
962  /* init q matrix */
963  for (i = 0; i < 64; i++) {
964  int j = s->idsp.idct_permutation[i];
965  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
966  s->mpeg_quant) {
967  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
968  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
969  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
970  s->intra_matrix[j] =
971  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
972  } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
973  s->intra_matrix[j] =
974  s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
975  } else {
976  /* MPEG-1/2 */
977  s->chroma_intra_matrix[j] =
978  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
979  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
980  }
981  if (avctx->intra_matrix)
982  s->intra_matrix[j] = avctx->intra_matrix[i];
983  if (avctx->inter_matrix)
984  s->inter_matrix[j] = avctx->inter_matrix[i];
985  }
986 
987  /* precompute matrix */
988  /* for mjpeg, we do include qscale in the matrix */
989  if (s->out_format != FMT_MJPEG) {
990  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
991  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
992  31, 1);
993  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
994  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
995  31, 0);
996  }
997 
998  if ((ret = ff_rate_control_init(s)) < 0)
999  return ret;
1000 
1001  if (s->b_frame_strategy == 2) {
1002  for (i = 0; i < s->max_b_frames + 2; i++) {
1003  s->tmp_frames[i] = av_frame_alloc();
1004  if (!s->tmp_frames[i])
1005  return AVERROR(ENOMEM);
1006 
1007  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1008  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1009  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1010 
1011  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1012  if (ret < 0)
1013  return ret;
1014  }
1015  }
1016 
1017  cpb_props = ff_encode_add_cpb_side_data(avctx);
1018  if (!cpb_props)
1019  return AVERROR(ENOMEM);
1020  cpb_props->max_bitrate = avctx->rc_max_rate;
1021  cpb_props->min_bitrate = avctx->rc_min_rate;
1022  cpb_props->avg_bitrate = avctx->bit_rate;
1023  cpb_props->buffer_size = avctx->rc_buffer_size;
1024 
1025  return 0;
1026 }
1027 
1029 {
1031  int i;
1032 
1033  ff_rate_control_uninit(&s->rc_context);
1034 
1036  ff_refstruct_pool_uninit(&s->picture_pool);
1037 
1038  if (s->input_picture && s->reordered_input_picture) {
1039  for (int i = 0; i < MAX_B_FRAMES + 1; i++) {
1040  ff_refstruct_unref(&s->input_picture[i]);
1041  ff_refstruct_unref(&s->reordered_input_picture[i]);
1042  }
1043  }
1044  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1045  av_frame_free(&s->tmp_frames[i]);
1046 
1047  av_frame_free(&s->new_pic);
1048 
1050 
1051  av_freep(&s->p_mv_table_base);
1052  av_freep(&s->b_forw_mv_table_base);
1053  av_freep(&s->b_back_mv_table_base);
1054  av_freep(&s->b_bidir_forw_mv_table_base);
1055  av_freep(&s->b_bidir_back_mv_table_base);
1056  av_freep(&s->b_direct_mv_table_base);
1057  av_freep(&s->b_field_mv_table_base);
1058  av_freep(&s->b_field_select_table[0][0]);
1059  av_freep(&s->p_field_select_table[0]);
1060 
1061  av_freep(&s->mb_type);
1062  av_freep(&s->lambda_table);
1063 
1064  av_freep(&s->cplx_tab);
1065  av_freep(&s->bits_tab);
1066 
1067  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1068  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1069  s->q_chroma_intra_matrix= NULL;
1070  s->q_chroma_intra_matrix16= NULL;
1071  av_freep(&s->q_intra_matrix);
1072  av_freep(&s->q_inter_matrix);
1073  av_freep(&s->q_intra_matrix16);
1074  av_freep(&s->q_inter_matrix16);
1075  av_freep(&s->input_picture);
1076  av_freep(&s->reordered_input_picture);
1077  av_freep(&s->dct_offset);
1078  av_freep(&s->mb_var);
1079  av_freep(&s->mc_mb_var);
1080  av_freep(&s->mb_mean);
1081 
1082  return 0;
1083 }
1084 
1085 #define IS_ENCODER 1
1087 
1088 static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
1089 {
1090  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1091  /* print DCT coefficients */
1092  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1093  for (int i = 0; i < 6; i++) {
1094  for (int j = 0; j < 64; j++) {
1095  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1096  block[i][s->idsp.idct_permutation[j]]);
1097  }
1098  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1099  }
1100  }
1101 
1103 }
1104 
1105 static int get_sae(const uint8_t *src, int ref, int stride)
1106 {
1107  int x,y;
1108  int acc = 0;
1109 
1110  for (y = 0; y < 16; y++) {
1111  for (x = 0; x < 16; x++) {
1112  acc += FFABS(src[x + y * stride] - ref);
1113  }
1114  }
1115 
1116  return acc;
1117 }
1118 
1119 static int get_intra_count(MpegEncContext *s, const uint8_t *src,
1120  const uint8_t *ref, int stride)
1121 {
1122  int x, y, w, h;
1123  int acc = 0;
1124 
1125  w = s->width & ~15;
1126  h = s->height & ~15;
1127 
1128  for (y = 0; y < h; y += 16) {
1129  for (x = 0; x < w; x += 16) {
1130  int offset = x + y * stride;
1131  int sad = s->sad_cmp[0](NULL, src + offset, ref + offset,
1132  stride, 16);
1133  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1134  int sae = get_sae(src + offset, mean, stride);
1135 
1136  acc += sae + 500 < sad;
1137  }
1138  }
1139  return acc;
1140 }
1141 
1142 /**
1143  * Allocates new buffers for an AVFrame and copies the properties
1144  * from another AVFrame.
1145  */
1146 static int prepare_picture(MpegEncContext *s, AVFrame *f, const AVFrame *props_frame)
1147 {
1148  AVCodecContext *avctx = s->avctx;
1149  int ret;
1150 
1151  f->width = avctx->width + 2 * EDGE_WIDTH;
1152  f->height = avctx->height + 2 * EDGE_WIDTH;
1153 
1155  if (ret < 0)
1156  return ret;
1157 
1158  ret = ff_mpv_pic_check_linesize(avctx, f, &s->linesize, &s->uvlinesize);
1159  if (ret < 0)
1160  return ret;
1161 
1162  for (int i = 0; f->data[i]; i++) {
1163  int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
1164  f->linesize[i] +
1165  (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
1166  f->data[i] += offset;
1167  }
1168  f->width = avctx->width;
1169  f->height = avctx->height;
1170 
1171  ret = av_frame_copy_props(f, props_frame);
1172  if (ret < 0)
1173  return ret;
1174 
1175  return 0;
1176 }
1177 
1178 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1179 {
1180  MPVPicture *pic = NULL;
1181  int64_t pts;
1182  int display_picture_number = 0, ret;
1183  int encoding_delay = s->max_b_frames ? s->max_b_frames
1184  : (s->low_delay ? 0 : 1);
1185  int flush_offset = 1;
1186  int direct = 1;
1187 
1188  av_assert1(!s->input_picture[0]);
1189 
1190  if (pic_arg) {
1191  pts = pic_arg->pts;
1192  display_picture_number = s->input_picture_number++;
1193 
1194  if (pts != AV_NOPTS_VALUE) {
1195  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1196  int64_t last = s->user_specified_pts;
1197 
1198  if (pts <= last) {
1199  av_log(s->avctx, AV_LOG_ERROR,
1200  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1201  pts, last);
1202  return AVERROR(EINVAL);
1203  }
1204 
1205  if (!s->low_delay && display_picture_number == 1)
1206  s->dts_delta = pts - last;
1207  }
1208  s->user_specified_pts = pts;
1209  } else {
1210  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1211  s->user_specified_pts =
1212  pts = s->user_specified_pts + 1;
1213  av_log(s->avctx, AV_LOG_INFO,
1214  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1215  pts);
1216  } else {
1217  pts = display_picture_number;
1218  }
1219  }
1220 
1221  if (pic_arg->linesize[0] != s->linesize ||
1222  pic_arg->linesize[1] != s->uvlinesize ||
1223  pic_arg->linesize[2] != s->uvlinesize)
1224  direct = 0;
1225  if ((s->width & 15) || (s->height & 15))
1226  direct = 0;
1227  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1228  direct = 0;
1229  if (s->linesize & (STRIDE_ALIGN-1))
1230  direct = 0;
1231 
1232  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1233  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1234 
1235  pic = ff_refstruct_pool_get(s->picture_pool);
1236  if (!pic)
1237  return AVERROR(ENOMEM);
1238 
1239  if (direct) {
1240  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1241  goto fail;
1242  pic->shared = 1;
1243  } else {
1244  ret = prepare_picture(s, pic->f, pic_arg);
1245  if (ret < 0)
1246  goto fail;
1247 
1248  for (int i = 0; i < 3; i++) {
1249  ptrdiff_t src_stride = pic_arg->linesize[i];
1250  ptrdiff_t dst_stride = i ? s->uvlinesize : s->linesize;
1251  int h_shift = i ? s->chroma_x_shift : 0;
1252  int v_shift = i ? s->chroma_y_shift : 0;
1253  int w = AV_CEIL_RSHIFT(s->width , h_shift);
1254  int h = AV_CEIL_RSHIFT(s->height, v_shift);
1255  const uint8_t *src = pic_arg->data[i];
1256  uint8_t *dst = pic->f->data[i];
1257  int vpad = 16;
1258 
1259  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1260  && !s->progressive_sequence
1261  && FFALIGN(s->height, 32) - s->height > 16)
1262  vpad = 32;
1263 
1264  if (!s->avctx->rc_buffer_size)
1265  dst += INPLACE_OFFSET;
1266 
1267  if (src_stride == dst_stride)
1268  memcpy(dst, src, src_stride * h - src_stride + w);
1269  else {
1270  int h2 = h;
1271  uint8_t *dst2 = dst;
1272  while (h2--) {
1273  memcpy(dst2, src, w);
1274  dst2 += dst_stride;
1275  src += src_stride;
1276  }
1277  }
1278  if ((s->width & 15) || (s->height & (vpad-1))) {
1279  s->mpvencdsp.draw_edges(dst, dst_stride,
1280  w, h,
1281  16 >> h_shift,
1282  vpad >> v_shift,
1283  EDGE_BOTTOM);
1284  }
1285  }
1286  emms_c();
1287  }
1288 
1289  pic->display_picture_number = display_picture_number;
1290  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1291  } else if (!s->reordered_input_picture[1]) {
1292  /* Flushing: When the above check is true, the encoder is about to run
1293  * out of frames to encode. Check if there are input_pictures left;
1294  * if so, ensure s->input_picture[0] contains the first picture.
1295  * A flush_offset != 1 will only happen if we did not receive enough
1296  * input frames. */
1297  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1298  if (s->input_picture[flush_offset])
1299  break;
1300 
1301  encoding_delay -= flush_offset - 1;
1302  }
1303 
1304  /* shift buffer entries */
1305  for (int i = flush_offset; i <= MAX_B_FRAMES; i++)
1306  s->input_picture[i - flush_offset] = s->input_picture[i];
1307  for (int i = MAX_B_FRAMES + 1 - flush_offset; i <= MAX_B_FRAMES; i++)
1308  s->input_picture[i] = NULL;
1309 
1310  s->input_picture[encoding_delay] = pic;
1311 
1312  return 0;
1313 fail:
1314  ff_refstruct_unref(&pic);
1315  return ret;
1316 }
1317 
1318 static int skip_check(MpegEncContext *s, const MPVPicture *p, const MPVPicture *ref)
1319 {
1320  int x, y, plane;
1321  int score = 0;
1322  int64_t score64 = 0;
1323 
1324  for (plane = 0; plane < 3; plane++) {
1325  const int stride = p->f->linesize[plane];
1326  const int bw = plane ? 1 : 2;
1327  for (y = 0; y < s->mb_height * bw; y++) {
1328  for (x = 0; x < s->mb_width * bw; x++) {
1329  int off = p->shared ? 0 : 16;
1330  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1331  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1332  int v = s->frame_skip_cmp_fn(s, dptr, rptr, stride, 8);
1333 
1334  switch (FFABS(s->frame_skip_exp)) {
1335  case 0: score = FFMAX(score, v); break;
1336  case 1: score += FFABS(v); break;
1337  case 2: score64 += v * (int64_t)v; break;
1338  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1339  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1340  }
1341  }
1342  }
1343  }
1344  emms_c();
1345 
1346  if (score)
1347  score64 = score;
1348  if (s->frame_skip_exp < 0)
1349  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1350  -1.0/s->frame_skip_exp);
1351 
1352  if (score64 < s->frame_skip_threshold)
1353  return 1;
1354  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1355  return 1;
1356  return 0;
1357 }
1358 
1360 {
1361  int ret;
1362  int size = 0;
1363 
1365  if (ret < 0)
1366  return ret;
1367 
1368  do {
1370  if (ret >= 0) {
1371  size += pkt->size;
1373  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1374  return ret;
1375  } while (ret >= 0);
1376 
1377  return size;
1378 }
1379 
1381 {
1382  AVPacket *pkt;
1383  const int scale = s->brd_scale;
1384  int width = s->width >> scale;
1385  int height = s->height >> scale;
1386  int i, j, out_size, p_lambda, b_lambda, lambda2;
1387  int64_t best_rd = INT64_MAX;
1388  int best_b_count = -1;
1389  int ret = 0;
1390 
1391  av_assert0(scale >= 0 && scale <= 3);
1392 
1393  pkt = av_packet_alloc();
1394  if (!pkt)
1395  return AVERROR(ENOMEM);
1396 
1397  //emms_c();
1398  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1399  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1400  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1401  if (!b_lambda) // FIXME we should do this somewhere else
1402  b_lambda = p_lambda;
1403  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1405 
1406  for (i = 0; i < s->max_b_frames + 2; i++) {
1407  const MPVPicture *pre_input_ptr = i ? s->input_picture[i - 1] :
1408  s->next_pic.ptr;
1409 
1410  if (pre_input_ptr) {
1411  const uint8_t *data[4];
1412  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1413 
1414  if (!pre_input_ptr->shared && i) {
1415  data[0] += INPLACE_OFFSET;
1416  data[1] += INPLACE_OFFSET;
1417  data[2] += INPLACE_OFFSET;
1418  }
1419 
1420  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1421  s->tmp_frames[i]->linesize[0],
1422  data[0],
1423  pre_input_ptr->f->linesize[0],
1424  width, height);
1425  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1426  s->tmp_frames[i]->linesize[1],
1427  data[1],
1428  pre_input_ptr->f->linesize[1],
1429  width >> 1, height >> 1);
1430  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1431  s->tmp_frames[i]->linesize[2],
1432  data[2],
1433  pre_input_ptr->f->linesize[2],
1434  width >> 1, height >> 1);
1435  }
1436  }
1437 
1438  for (j = 0; j < s->max_b_frames + 1; j++) {
1439  AVCodecContext *c;
1440  int64_t rd = 0;
1441 
1442  if (!s->input_picture[j])
1443  break;
1444 
1446  if (!c) {
1447  ret = AVERROR(ENOMEM);
1448  goto fail;
1449  }
1450 
1451  c->width = width;
1452  c->height = height;
1454  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1455  c->mb_decision = s->avctx->mb_decision;
1456  c->me_cmp = s->avctx->me_cmp;
1457  c->mb_cmp = s->avctx->mb_cmp;
1458  c->me_sub_cmp = s->avctx->me_sub_cmp;
1459  c->pix_fmt = AV_PIX_FMT_YUV420P;
1460  c->time_base = s->avctx->time_base;
1461  c->max_b_frames = s->max_b_frames;
1462 
1463  ret = avcodec_open2(c, s->avctx->codec, NULL);
1464  if (ret < 0)
1465  goto fail;
1466 
1467 
1468  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1469  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1470 
1471  out_size = encode_frame(c, s->tmp_frames[0], pkt);
1472  if (out_size < 0) {
1473  ret = out_size;
1474  goto fail;
1475  }
1476 
1477  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1478 
1479  for (i = 0; i < s->max_b_frames + 1; i++) {
1480  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1481 
1482  s->tmp_frames[i + 1]->pict_type = is_p ?
1484  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1485 
1486  out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1487  if (out_size < 0) {
1488  ret = out_size;
1489  goto fail;
1490  }
1491 
1492  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1493  }
1494 
1495  /* get the delayed frames */
1497  if (out_size < 0) {
1498  ret = out_size;
1499  goto fail;
1500  }
1501  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1502 
1503  rd += c->error[0] + c->error[1] + c->error[2];
1504 
1505  if (rd < best_rd) {
1506  best_rd = rd;
1507  best_b_count = j;
1508  }
1509 
1510 fail:
1513  if (ret < 0) {
1514  best_b_count = ret;
1515  break;
1516  }
1517  }
1518 
1519  av_packet_free(&pkt);
1520 
1521  return best_b_count;
1522 }
1523 
1524 /**
1525  * Determines whether an input picture is discarded or not
1526  * and if not determines the length of the next chain of B frames
1527  * and moves these pictures (including the P frame) into
1528  * reordered_input_picture.
1529  * input_picture[0] is always NULL when exiting this function, even on error;
1530  * reordered_input_picture[0] is always NULL when exiting this function on error.
1531  */
1533 {
1534  /* Either nothing to do or can't do anything */
1535  if (s->reordered_input_picture[0] || !s->input_picture[0])
1536  return 0;
1537 
1538  /* set next picture type & ordering */
1539  if (s->frame_skip_threshold || s->frame_skip_factor) {
1540  if (s->picture_in_gop_number < s->gop_size &&
1541  s->next_pic.ptr &&
1542  skip_check(s, s->input_picture[0], s->next_pic.ptr)) {
1543  // FIXME check that the gop check above is +-1 correct
1544  ff_refstruct_unref(&s->input_picture[0]);
1545 
1546  ff_vbv_update(s, 0);
1547 
1548  return 0;
1549  }
1550  }
1551 
1552  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1553  !s->next_pic.ptr || s->intra_only) {
1554  s->reordered_input_picture[0] = s->input_picture[0];
1555  s->input_picture[0] = NULL;
1556  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1557  s->reordered_input_picture[0]->coded_picture_number =
1558  s->coded_picture_number++;
1559  } else {
1560  int b_frames = 0;
1561 
1562  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1563  for (int i = 0; i < s->max_b_frames + 1; i++) {
1564  int pict_num = s->input_picture[0]->display_picture_number + i;
1565 
1566  if (pict_num >= s->rc_context.num_entries)
1567  break;
1568  if (!s->input_picture[i]) {
1569  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1570  break;
1571  }
1572 
1573  s->input_picture[i]->f->pict_type =
1574  s->rc_context.entry[pict_num].new_pict_type;
1575  }
1576  }
1577 
1578  if (s->b_frame_strategy == 0) {
1579  b_frames = s->max_b_frames;
1580  while (b_frames && !s->input_picture[b_frames])
1581  b_frames--;
1582  } else if (s->b_frame_strategy == 1) {
1583  int i;
1584  for (i = 1; i < s->max_b_frames + 1; i++) {
1585  if (s->input_picture[i] &&
1586  s->input_picture[i]->b_frame_score == 0) {
1587  s->input_picture[i]->b_frame_score =
1589  s->input_picture[i ]->f->data[0],
1590  s->input_picture[i - 1]->f->data[0],
1591  s->linesize) + 1;
1592  }
1593  }
1594  for (i = 0; i < s->max_b_frames + 1; i++) {
1595  if (!s->input_picture[i] ||
1596  s->input_picture[i]->b_frame_score - 1 >
1597  s->mb_num / s->b_sensitivity)
1598  break;
1599  }
1600 
1601  b_frames = FFMAX(0, i - 1);
1602 
1603  /* reset scores */
1604  for (i = 0; i < b_frames + 1; i++) {
1605  s->input_picture[i]->b_frame_score = 0;
1606  }
1607  } else if (s->b_frame_strategy == 2) {
1608  b_frames = estimate_best_b_count(s);
1609  if (b_frames < 0) {
1610  ff_refstruct_unref(&s->input_picture[0]);
1611  return b_frames;
1612  }
1613  }
1614 
1615  emms_c();
1616 
1617  for (int i = b_frames - 1; i >= 0; i--) {
1618  int type = s->input_picture[i]->f->pict_type;
1619  if (type && type != AV_PICTURE_TYPE_B)
1620  b_frames = i;
1621  }
1622  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1623  b_frames == s->max_b_frames) {
1624  av_log(s->avctx, AV_LOG_ERROR,
1625  "warning, too many B-frames in a row\n");
1626  }
1627 
1628  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1629  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1630  s->gop_size > s->picture_in_gop_number) {
1631  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1632  } else {
1633  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1634  b_frames = 0;
1635  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1636  }
1637  }
1638 
1639  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1640  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1641  b_frames--;
1642 
1643  s->reordered_input_picture[0] = s->input_picture[b_frames];
1644  s->input_picture[b_frames] = NULL;
1645  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1646  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1647  s->reordered_input_picture[0]->coded_picture_number =
1648  s->coded_picture_number++;
1649  for (int i = 0; i < b_frames; i++) {
1650  s->reordered_input_picture[i + 1] = s->input_picture[i];
1651  s->input_picture[i] = NULL;
1652  s->reordered_input_picture[i + 1]->f->pict_type =
1654  s->reordered_input_picture[i + 1]->coded_picture_number =
1655  s->coded_picture_number++;
1656  }
1657  }
1658 
1659  return 0;
1660 }
1661 
1663 {
1664  int ret;
1665 
1666  av_assert1(!s->reordered_input_picture[0]);
1667 
1668  for (int i = 1; i <= MAX_B_FRAMES; i++)
1669  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1670  s->reordered_input_picture[MAX_B_FRAMES] = NULL;
1671 
1673  av_assert1(!s->input_picture[0]);
1674  if (ret < 0)
1675  return ret;
1676 
1677  av_frame_unref(s->new_pic);
1678 
1679  if (s->reordered_input_picture[0]) {
1680  s->reordered_input_picture[0]->reference =
1681  s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_B;
1682 
1683  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1684  // input is a shared pix, so we can't modify it -> allocate a new
1685  // one & ensure that the shared one is reuseable
1686  av_frame_move_ref(s->new_pic, s->reordered_input_picture[0]->f);
1687 
1688  ret = prepare_picture(s, s->reordered_input_picture[0]->f, s->new_pic);
1689  if (ret < 0)
1690  goto fail;
1691  } else {
1692  // input is not a shared pix -> reuse buffer for current_pix
1693  ret = av_frame_ref(s->new_pic, s->reordered_input_picture[0]->f);
1694  if (ret < 0)
1695  goto fail;
1696  for (int i = 0; i < MPV_MAX_PLANES; i++) {
1697  if (s->new_pic->data[i])
1698  s->new_pic->data[i] += INPLACE_OFFSET;
1699  }
1700  }
1701  s->cur_pic.ptr = s->reordered_input_picture[0];
1702  s->reordered_input_picture[0] = NULL;
1703  av_assert1(s->mb_width == s->buffer_pools.alloc_mb_width);
1704  av_assert1(s->mb_height == s->buffer_pools.alloc_mb_height);
1705  av_assert1(s->mb_stride == s->buffer_pools.alloc_mb_stride);
1706  ret = ff_mpv_alloc_pic_accessories(s->avctx, &s->cur_pic,
1707  &s->sc, &s->buffer_pools, s->mb_height);
1708  if (ret < 0) {
1709  ff_mpv_unref_picture(&s->cur_pic);
1710  return ret;
1711  }
1712  s->picture_number = s->cur_pic.ptr->display_picture_number;
1713 
1714  }
1715  return 0;
1716 fail:
1717  ff_refstruct_unref(&s->reordered_input_picture[0]);
1718  return ret;
1719 }
1720 
1722 {
1723  if (s->unrestricted_mv &&
1724  s->cur_pic.reference &&
1725  !s->intra_only) {
1726  int hshift = s->chroma_x_shift;
1727  int vshift = s->chroma_y_shift;
1728  s->mpvencdsp.draw_edges(s->cur_pic.data[0],
1729  s->cur_pic.linesize[0],
1730  s->h_edge_pos, s->v_edge_pos,
1732  EDGE_TOP | EDGE_BOTTOM);
1733  s->mpvencdsp.draw_edges(s->cur_pic.data[1],
1734  s->cur_pic.linesize[1],
1735  s->h_edge_pos >> hshift,
1736  s->v_edge_pos >> vshift,
1737  EDGE_WIDTH >> hshift,
1738  EDGE_WIDTH >> vshift,
1739  EDGE_TOP | EDGE_BOTTOM);
1740  s->mpvencdsp.draw_edges(s->cur_pic.data[2],
1741  s->cur_pic.linesize[2],
1742  s->h_edge_pos >> hshift,
1743  s->v_edge_pos >> vshift,
1744  EDGE_WIDTH >> hshift,
1745  EDGE_WIDTH >> vshift,
1746  EDGE_TOP | EDGE_BOTTOM);
1747  }
1748 
1749  emms_c();
1750 
1751  s->last_pict_type = s->pict_type;
1752  s->last_lambda_for [s->pict_type] = s->cur_pic.ptr->f->quality;
1753  if (s->pict_type!= AV_PICTURE_TYPE_B)
1754  s->last_non_b_pict_type = s->pict_type;
1755 }
1756 
1758 {
1759  int intra, i;
1760 
1761  for (intra = 0; intra < 2; intra++) {
1762  if (s->dct_count[intra] > (1 << 16)) {
1763  for (i = 0; i < 64; i++) {
1764  s->dct_error_sum[intra][i] >>= 1;
1765  }
1766  s->dct_count[intra] >>= 1;
1767  }
1768 
1769  for (i = 0; i < 64; i++) {
1770  s->dct_offset[intra][i] = (s->noise_reduction *
1771  s->dct_count[intra] +
1772  s->dct_error_sum[intra][i] / 2) /
1773  (s->dct_error_sum[intra][i] + 1);
1774  }
1775  }
1776 }
1777 
1779 {
1780  s->cur_pic.ptr->f->pict_type = s->pict_type;
1781 
1782  if (s->pict_type != AV_PICTURE_TYPE_B) {
1783  ff_mpv_replace_picture(&s->last_pic, &s->next_pic);
1784  ff_mpv_replace_picture(&s->next_pic, &s->cur_pic);
1785  }
1786 
1787  if (s->dct_error_sum) {
1788  av_assert2(s->noise_reduction && s->encoding);
1790  }
1791 }
1792 
1794  const AVFrame *pic_arg, int *got_packet)
1795 {
1797  int stuffing_count, ret;
1798  int context_count = s->slice_context_count;
1799 
1800  ff_mpv_unref_picture(&s->cur_pic);
1801 
1802  s->vbv_ignore_qmax = 0;
1803 
1804  s->picture_in_gop_number++;
1805 
1806  if (load_input_picture(s, pic_arg) < 0)
1807  return -1;
1808 
1809  if (select_input_picture(s) < 0) {
1810  return -1;
1811  }
1812 
1813  /* output? */
1814  if (s->new_pic->data[0]) {
1815  int growing_buffer = context_count == 1 && !s->data_partitioning;
1816  size_t pkt_size = 10000 + s->mb_width * s->mb_height *
1817  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1818  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1819  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1820  if (ret < 0)
1821  return ret;
1822  }
1823  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1824  return ret;
1826  if (s->mb_info) {
1827  s->mb_info_ptr = av_packet_new_side_data(pkt,
1829  s->mb_width*s->mb_height*12);
1830  if (!s->mb_info_ptr)
1831  return AVERROR(ENOMEM);
1832  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1833  }
1834 
1835  s->pict_type = s->new_pic->pict_type;
1836  //emms_c();
1837  frame_start(s);
1838 vbv_retry:
1839  ret = encode_picture(s, pkt);
1840  if (growing_buffer) {
1841  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1842  pkt->data = s->pb.buf;
1844  }
1845  if (ret < 0)
1846  return -1;
1847 
1848  frame_end(s);
1849 
1850  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1851  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1852 
1853  if (avctx->rc_buffer_size) {
1854  RateControlContext *rcc = &s->rc_context;
1855  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1856  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1857  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1858 
1859  if (put_bits_count(&s->pb) > max_size &&
1860  s->lambda < s->lmax) {
1861  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1862  (s->qscale + 1) / s->qscale);
1863  if (s->adaptive_quant) {
1864  int i;
1865  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1866  s->lambda_table[i] =
1867  FFMAX(s->lambda_table[i] + min_step,
1868  s->lambda_table[i] * (s->qscale + 1) /
1869  s->qscale);
1870  }
1871  s->mb_skipped = 0; // done in frame_start()
1872  // done in encode_picture() so we must undo it
1873  if (s->pict_type == AV_PICTURE_TYPE_P) {
1874  s->no_rounding ^= s->flipflop_rounding;
1875  }
1876  if (s->pict_type != AV_PICTURE_TYPE_B) {
1877  s->time_base = s->last_time_base;
1878  s->last_non_b_time = s->time - s->pp_time;
1879  }
1880  s->vbv_ignore_qmax = 1;
1881  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1882  goto vbv_retry;
1883  }
1884 
1886  }
1887 
1890 
1891  for (int i = 0; i < MPV_MAX_PLANES; i++)
1892  avctx->error[i] += s->encoding_error[i];
1893  ff_side_data_set_encoder_stats(pkt, s->cur_pic.ptr->f->quality,
1894  s->encoding_error,
1896  s->pict_type);
1897 
1899  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1900  s->misc_bits + s->i_tex_bits +
1901  s->p_tex_bits);
1902  flush_put_bits(&s->pb);
1903  s->frame_bits = put_bits_count(&s->pb);
1904 
1905  stuffing_count = ff_vbv_update(s, s->frame_bits);
1906  s->stuffing_bits = 8*stuffing_count;
1907  if (stuffing_count) {
1908  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1909  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1910  return -1;
1911  }
1912 
1913  switch (s->codec_id) {
1916  while (stuffing_count--) {
1917  put_bits(&s->pb, 8, 0);
1918  }
1919  break;
1920  case AV_CODEC_ID_MPEG4:
1921  put_bits(&s->pb, 16, 0);
1922  put_bits(&s->pb, 16, 0x1C3);
1923  stuffing_count -= 4;
1924  while (stuffing_count--) {
1925  put_bits(&s->pb, 8, 0xFF);
1926  }
1927  break;
1928  default:
1929  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1930  s->stuffing_bits = 0;
1931  }
1932  flush_put_bits(&s->pb);
1933  s->frame_bits = put_bits_count(&s->pb);
1934  }
1935 
1936  /* update MPEG-1/2 vbv_delay for CBR */
1937  if (avctx->rc_max_rate &&
1939  s->out_format == FMT_MPEG1 &&
1940  90000LL * (avctx->rc_buffer_size - 1) <=
1941  avctx->rc_max_rate * 0xFFFFLL) {
1942  AVCPBProperties *props;
1943  size_t props_size;
1944 
1945  int vbv_delay, min_delay;
1946  double inbits = avctx->rc_max_rate *
1948  int minbits = s->frame_bits - 8 *
1949  (s->vbv_delay_pos - 1);
1950  double bits = s->rc_context.buffer_index + minbits - inbits;
1951  uint8_t *const vbv_delay_ptr = s->pb.buf + s->vbv_delay_pos;
1952 
1953  if (bits < 0)
1955  "Internal error, negative bits\n");
1956 
1957  av_assert1(s->repeat_first_field == 0);
1958 
1959  vbv_delay = bits * 90000 / avctx->rc_max_rate;
1960  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
1961  avctx->rc_max_rate;
1962 
1963  vbv_delay = FFMAX(vbv_delay, min_delay);
1964 
1965  av_assert0(vbv_delay < 0xFFFF);
1966 
1967  vbv_delay_ptr[0] &= 0xF8;
1968  vbv_delay_ptr[0] |= vbv_delay >> 13;
1969  vbv_delay_ptr[1] = vbv_delay >> 5;
1970  vbv_delay_ptr[2] &= 0x07;
1971  vbv_delay_ptr[2] |= vbv_delay << 3;
1972 
1973  props = av_cpb_properties_alloc(&props_size);
1974  if (!props)
1975  return AVERROR(ENOMEM);
1976  props->vbv_delay = vbv_delay * 300;
1977 
1979  (uint8_t*)props, props_size);
1980  if (ret < 0) {
1981  av_freep(&props);
1982  return ret;
1983  }
1984  }
1985  s->total_bits += s->frame_bits;
1986 
1987  pkt->pts = s->cur_pic.ptr->f->pts;
1988  pkt->duration = s->cur_pic.ptr->f->duration;
1989  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1990  if (!s->cur_pic.ptr->coded_picture_number)
1991  pkt->dts = pkt->pts - s->dts_delta;
1992  else
1993  pkt->dts = s->reordered_pts;
1994  s->reordered_pts = pkt->pts;
1995  } else
1996  pkt->dts = pkt->pts;
1997 
1998  // the no-delay case is handled in generic code
2000  ret = ff_encode_reordered_opaque(avctx, pkt, s->cur_pic.ptr->f);
2001  if (ret < 0)
2002  return ret;
2003  }
2004 
2005  if (s->cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
2007  if (s->mb_info)
2009  } else {
2010  s->frame_bits = 0;
2011  }
2012 
2013  ff_mpv_unref_picture(&s->cur_pic);
2014 
2015  av_assert1((s->frame_bits & 7) == 0);
2016 
2017  pkt->size = s->frame_bits / 8;
2018  *got_packet = !!pkt->size;
2019  return 0;
2020 }
2021 
2023  int n, int threshold)
2024 {
2025  static const char tab[64] = {
2026  3, 2, 2, 1, 1, 1, 1, 1,
2027  1, 1, 1, 1, 1, 1, 1, 1,
2028  1, 1, 1, 1, 1, 1, 1, 1,
2029  0, 0, 0, 0, 0, 0, 0, 0,
2030  0, 0, 0, 0, 0, 0, 0, 0,
2031  0, 0, 0, 0, 0, 0, 0, 0,
2032  0, 0, 0, 0, 0, 0, 0, 0,
2033  0, 0, 0, 0, 0, 0, 0, 0
2034  };
2035  int score = 0;
2036  int run = 0;
2037  int i;
2038  int16_t *block = s->block[n];
2039  const int last_index = s->block_last_index[n];
2040  int skip_dc;
2041 
2042  if (threshold < 0) {
2043  skip_dc = 0;
2044  threshold = -threshold;
2045  } else
2046  skip_dc = 1;
2047 
2048  /* Are all we could set to zero already zero? */
2049  if (last_index <= skip_dc - 1)
2050  return;
2051 
2052  for (i = 0; i <= last_index; i++) {
2053  const int j = s->intra_scantable.permutated[i];
2054  const int level = FFABS(block[j]);
2055  if (level == 1) {
2056  if (skip_dc && i == 0)
2057  continue;
2058  score += tab[run];
2059  run = 0;
2060  } else if (level > 1) {
2061  return;
2062  } else {
2063  run++;
2064  }
2065  }
2066  if (score >= threshold)
2067  return;
2068  for (i = skip_dc; i <= last_index; i++) {
2069  const int j = s->intra_scantable.permutated[i];
2070  block[j] = 0;
2071  }
2072  if (block[0])
2073  s->block_last_index[n] = 0;
2074  else
2075  s->block_last_index[n] = -1;
2076 }
2077 
2078 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2079  int last_index)
2080 {
2081  int i;
2082  const int maxlevel = s->max_qcoeff;
2083  const int minlevel = s->min_qcoeff;
2084  int overflow = 0;
2085 
2086  if (s->mb_intra) {
2087  i = 1; // skip clipping of intra dc
2088  } else
2089  i = 0;
2090 
2091  for (; i <= last_index; i++) {
2092  const int j = s->intra_scantable.permutated[i];
2093  int level = block[j];
2094 
2095  if (level > maxlevel) {
2096  level = maxlevel;
2097  overflow++;
2098  } else if (level < minlevel) {
2099  level = minlevel;
2100  overflow++;
2101  }
2102 
2103  block[j] = level;
2104  }
2105 
2106  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2107  av_log(s->avctx, AV_LOG_INFO,
2108  "warning, clipping %d dct coefficients to %d..%d\n",
2109  overflow, minlevel, maxlevel);
2110 }
2111 
2112 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2113 {
2114  int x, y;
2115  // FIXME optimize
2116  for (y = 0; y < 8; y++) {
2117  for (x = 0; x < 8; x++) {
2118  int x2, y2;
2119  int sum = 0;
2120  int sqr = 0;
2121  int count = 0;
2122 
2123  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2124  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2125  int v = ptr[x2 + y2 * stride];
2126  sum += v;
2127  sqr += v * v;
2128  count++;
2129  }
2130  }
2131  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2132  }
2133  }
2134 }
2135 
2137  int motion_x, int motion_y,
2138  int mb_block_height,
2139  int mb_block_width,
2140  int mb_block_count,
2141  int chroma_x_shift,
2142  int chroma_y_shift,
2143  int chroma_format)
2144 {
2145 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2146  * and neither of these encoders currently supports 444. */
2147 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2148  (s)->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2149  int16_t weight[12][64];
2150  int16_t orig[12][64];
2151  const int mb_x = s->mb_x;
2152  const int mb_y = s->mb_y;
2153  int i;
2154  int skip_dct[12];
2155  int dct_offset = s->linesize * 8; // default for progressive frames
2156  int uv_dct_offset = s->uvlinesize * 8;
2157  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2158  ptrdiff_t wrap_y, wrap_c;
2159 
2160  for (i = 0; i < mb_block_count; i++)
2161  skip_dct[i] = s->skipdct;
2162 
2163  if (s->adaptive_quant) {
2164  const int last_qp = s->qscale;
2165  const int mb_xy = mb_x + mb_y * s->mb_stride;
2166 
2167  s->lambda = s->lambda_table[mb_xy];
2168  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
2170 
2171  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2172  s->dquant = s->cur_pic.qscale_table[mb_xy] - last_qp;
2173 
2174  if (s->out_format == FMT_H263) {
2175  s->dquant = av_clip(s->dquant, -2, 2);
2176 
2177  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2178  if (!s->mb_intra) {
2179  if (s->pict_type == AV_PICTURE_TYPE_B) {
2180  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2181  s->dquant = 0;
2182  }
2183  if (s->mv_type == MV_TYPE_8X8)
2184  s->dquant = 0;
2185  }
2186  }
2187  }
2188  }
2189  ff_set_qscale(s, last_qp + s->dquant);
2190  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2191  ff_set_qscale(s, s->qscale + s->dquant);
2192 
2193  wrap_y = s->linesize;
2194  wrap_c = s->uvlinesize;
2195  ptr_y = s->new_pic->data[0] +
2196  (mb_y * 16 * wrap_y) + mb_x * 16;
2197  ptr_cb = s->new_pic->data[1] +
2198  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2199  ptr_cr = s->new_pic->data[2] +
2200  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2201 
2202  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2203  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2204  int cw = (s->width + chroma_x_shift) >> chroma_x_shift;
2205  int ch = (s->height + chroma_y_shift) >> chroma_y_shift;
2206  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2207  wrap_y, wrap_y,
2208  16, 16, mb_x * 16, mb_y * 16,
2209  s->width, s->height);
2210  ptr_y = ebuf;
2211  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2212  wrap_c, wrap_c,
2213  mb_block_width, mb_block_height,
2214  mb_x * mb_block_width, mb_y * mb_block_height,
2215  cw, ch);
2216  ptr_cb = ebuf + 16 * wrap_y;
2217  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2218  wrap_c, wrap_c,
2219  mb_block_width, mb_block_height,
2220  mb_x * mb_block_width, mb_y * mb_block_height,
2221  cw, ch);
2222  ptr_cr = ebuf + 16 * wrap_y + 16;
2223  }
2224 
2225  if (s->mb_intra) {
2226  if (INTERLACED_DCT(s)) {
2227  int progressive_score, interlaced_score;
2228 
2229  s->interlaced_dct = 0;
2230  progressive_score = s->ildct_cmp[1](s, ptr_y, NULL, wrap_y, 8) +
2231  s->ildct_cmp[1](s, ptr_y + wrap_y * 8,
2232  NULL, wrap_y, 8) - 400;
2233 
2234  if (progressive_score > 0) {
2235  interlaced_score = s->ildct_cmp[1](s, ptr_y,
2236  NULL, wrap_y * 2, 8) +
2237  s->ildct_cmp[1](s, ptr_y + wrap_y,
2238  NULL, wrap_y * 2, 8);
2239  if (progressive_score > interlaced_score) {
2240  s->interlaced_dct = 1;
2241 
2242  dct_offset = wrap_y;
2243  uv_dct_offset = wrap_c;
2244  wrap_y <<= 1;
2245  if (chroma_format == CHROMA_422 ||
2247  wrap_c <<= 1;
2248  }
2249  }
2250  }
2251 
2252  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2253  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2254  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2255  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2256 
2257  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2258  skip_dct[4] = 1;
2259  skip_dct[5] = 1;
2260  } else {
2261  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2262  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2263  if (chroma_format == CHROMA_422) {
2264  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2265  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2266  } else if (chroma_format == CHROMA_444) {
2267  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2268  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2269  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2270  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2271  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2272  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2273  }
2274  }
2275  } else {
2276  op_pixels_func (*op_pix)[4];
2277  qpel_mc_func (*op_qpix)[16];
2278  uint8_t *dest_y, *dest_cb, *dest_cr;
2279 
2280  dest_y = s->dest[0];
2281  dest_cb = s->dest[1];
2282  dest_cr = s->dest[2];
2283 
2284  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2285  op_pix = s->hdsp.put_pixels_tab;
2286  op_qpix = s->qdsp.put_qpel_pixels_tab;
2287  } else {
2288  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2289  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2290  }
2291 
2292  if (s->mv_dir & MV_DIR_FORWARD) {
2293  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2294  s->last_pic.data,
2295  op_pix, op_qpix);
2296  op_pix = s->hdsp.avg_pixels_tab;
2297  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2298  }
2299  if (s->mv_dir & MV_DIR_BACKWARD) {
2300  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2301  s->next_pic.data,
2302  op_pix, op_qpix);
2303  }
2304 
2305  if (INTERLACED_DCT(s)) {
2306  int progressive_score, interlaced_score;
2307 
2308  s->interlaced_dct = 0;
2309  progressive_score = s->ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2310  s->ildct_cmp[0](s, dest_y + wrap_y * 8,
2311  ptr_y + wrap_y * 8,
2312  wrap_y, 8) - 400;
2313 
2314  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2315  progressive_score -= 400;
2316 
2317  if (progressive_score > 0) {
2318  interlaced_score = s->ildct_cmp[0](s, dest_y, ptr_y,
2319  wrap_y * 2, 8) +
2320  s->ildct_cmp[0](s, dest_y + wrap_y,
2321  ptr_y + wrap_y,
2322  wrap_y * 2, 8);
2323 
2324  if (progressive_score > interlaced_score) {
2325  s->interlaced_dct = 1;
2326 
2327  dct_offset = wrap_y;
2328  uv_dct_offset = wrap_c;
2329  wrap_y <<= 1;
2330  if (chroma_format == CHROMA_422)
2331  wrap_c <<= 1;
2332  }
2333  }
2334  }
2335 
2336  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2337  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2338  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2339  dest_y + dct_offset, wrap_y);
2340  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2341  dest_y + dct_offset + 8, wrap_y);
2342 
2343  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2344  skip_dct[4] = 1;
2345  skip_dct[5] = 1;
2346  } else {
2347  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2348  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2349  if (!chroma_y_shift) { /* 422 */
2350  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2351  dest_cb + uv_dct_offset, wrap_c);
2352  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2353  dest_cr + uv_dct_offset, wrap_c);
2354  }
2355  }
2356  /* pre quantization */
2357  if (s->mc_mb_var[s->mb_stride * mb_y + mb_x] < 2 * s->qscale * s->qscale) {
2358  // FIXME optimize
2359  if (s->sad_cmp[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2360  skip_dct[0] = 1;
2361  if (s->sad_cmp[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2362  skip_dct[1] = 1;
2363  if (s->sad_cmp[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2364  wrap_y, 8) < 20 * s->qscale)
2365  skip_dct[2] = 1;
2366  if (s->sad_cmp[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2367  wrap_y, 8) < 20 * s->qscale)
2368  skip_dct[3] = 1;
2369  if (s->sad_cmp[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2370  skip_dct[4] = 1;
2371  if (s->sad_cmp[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2372  skip_dct[5] = 1;
2373  if (!chroma_y_shift) { /* 422 */
2374  if (s->sad_cmp[1](NULL, ptr_cb + uv_dct_offset,
2375  dest_cb + uv_dct_offset,
2376  wrap_c, 8) < 20 * s->qscale)
2377  skip_dct[6] = 1;
2378  if (s->sad_cmp[1](NULL, ptr_cr + uv_dct_offset,
2379  dest_cr + uv_dct_offset,
2380  wrap_c, 8) < 20 * s->qscale)
2381  skip_dct[7] = 1;
2382  }
2383  }
2384  }
2385 
2386  if (s->quantizer_noise_shaping) {
2387  if (!skip_dct[0])
2388  get_visual_weight(weight[0], ptr_y , wrap_y);
2389  if (!skip_dct[1])
2390  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2391  if (!skip_dct[2])
2392  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2393  if (!skip_dct[3])
2394  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2395  if (!skip_dct[4])
2396  get_visual_weight(weight[4], ptr_cb , wrap_c);
2397  if (!skip_dct[5])
2398  get_visual_weight(weight[5], ptr_cr , wrap_c);
2399  if (!chroma_y_shift) { /* 422 */
2400  if (!skip_dct[6])
2401  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2402  wrap_c);
2403  if (!skip_dct[7])
2404  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2405  wrap_c);
2406  }
2407  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2408  }
2409 
2410  /* DCT & quantize */
2411  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2412  {
2413  for (i = 0; i < mb_block_count; i++) {
2414  if (!skip_dct[i]) {
2415  int overflow;
2416  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2417  // FIXME we could decide to change to quantizer instead of
2418  // clipping
2419  // JS: I don't think that would be a good idea it could lower
2420  // quality instead of improve it. Just INTRADC clipping
2421  // deserves changes in quantizer
2422  if (overflow)
2423  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2424  } else
2425  s->block_last_index[i] = -1;
2426  }
2427  if (s->quantizer_noise_shaping) {
2428  for (i = 0; i < mb_block_count; i++) {
2429  if (!skip_dct[i]) {
2430  s->block_last_index[i] =
2431  dct_quantize_refine(s, s->block[i], weight[i],
2432  orig[i], i, s->qscale);
2433  }
2434  }
2435  }
2436 
2437  if (s->luma_elim_threshold && !s->mb_intra)
2438  for (i = 0; i < 4; i++)
2439  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2440  if (s->chroma_elim_threshold && !s->mb_intra)
2441  for (i = 4; i < mb_block_count; i++)
2442  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2443 
2444  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2445  for (i = 0; i < mb_block_count; i++) {
2446  if (s->block_last_index[i] == -1)
2447  s->coded_score[i] = INT_MAX / 256;
2448  }
2449  }
2450  }
2451 
2452  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2453  s->block_last_index[4] =
2454  s->block_last_index[5] = 0;
2455  s->block[4][0] =
2456  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2457  if (!chroma_y_shift) { /* 422 / 444 */
2458  for (i=6; i<12; i++) {
2459  s->block_last_index[i] = 0;
2460  s->block[i][0] = s->block[4][0];
2461  }
2462  }
2463  }
2464 
2465  // non c quantize code returns incorrect block_last_index FIXME
2466  if (s->alternate_scan && s->dct_quantize != dct_quantize_c) {
2467  for (i = 0; i < mb_block_count; i++) {
2468  int j;
2469  if (s->block_last_index[i] > 0) {
2470  for (j = 63; j > 0; j--) {
2471  if (s->block[i][s->intra_scantable.permutated[j]])
2472  break;
2473  }
2474  s->block_last_index[i] = j;
2475  }
2476  }
2477  }
2478 
2479  /* huffman encode */
2480  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2483  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2484  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2485  break;
2486  case AV_CODEC_ID_MPEG4:
2487  if (CONFIG_MPEG4_ENCODER)
2488  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2489  break;
2490  case AV_CODEC_ID_MSMPEG4V2:
2491  case AV_CODEC_ID_MSMPEG4V3:
2492  case AV_CODEC_ID_WMV1:
2493  if (CONFIG_MSMPEG4ENC)
2494  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2495  break;
2496  case AV_CODEC_ID_WMV2:
2497  if (CONFIG_WMV2_ENCODER)
2498  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2499  break;
2500  case AV_CODEC_ID_H261:
2501  if (CONFIG_H261_ENCODER)
2502  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2503  break;
2504  case AV_CODEC_ID_H263:
2505  case AV_CODEC_ID_H263P:
2506  case AV_CODEC_ID_FLV1:
2507  case AV_CODEC_ID_RV10:
2508  case AV_CODEC_ID_RV20:
2509  if (CONFIG_H263_ENCODER)
2510  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2511  break;
2512 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2513  case AV_CODEC_ID_MJPEG:
2514  case AV_CODEC_ID_AMV:
2515  ff_mjpeg_encode_mb(s, s->block);
2516  break;
2517 #endif
2518  case AV_CODEC_ID_SPEEDHQ:
2519  if (CONFIG_SPEEDHQ_ENCODER)
2520  ff_speedhq_encode_mb(s, s->block);
2521  break;
2522  default:
2523  av_assert1(0);
2524  }
2525 }
2526 
2527 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2528 {
2529  if (s->chroma_format == CHROMA_420)
2530  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2531  else if (s->chroma_format == CHROMA_422)
2532  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2533  else
2534  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2535 }
2536 
2538  const MpegEncContext *s)
2539 {
2540  int i;
2541 
2542  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2543 
2544  /* MPEG-1 */
2545  d->mb_skip_run= s->mb_skip_run;
2546  for(i=0; i<3; i++)
2547  d->last_dc[i] = s->last_dc[i];
2548 
2549  /* statistics */
2550  d->mv_bits= s->mv_bits;
2551  d->i_tex_bits= s->i_tex_bits;
2552  d->p_tex_bits= s->p_tex_bits;
2553  d->i_count= s->i_count;
2554  d->misc_bits= s->misc_bits;
2555  d->last_bits= 0;
2556 
2557  d->mb_skipped= 0;
2558  d->qscale= s->qscale;
2559  d->dquant= s->dquant;
2560 
2561  d->esc3_level_length= s->esc3_level_length;
2562 }
2563 
2565  const MpegEncContext *s)
2566 {
2567  int i;
2568 
2569  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2570  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2571 
2572  /* MPEG-1 */
2573  d->mb_skip_run= s->mb_skip_run;
2574  for(i=0; i<3; i++)
2575  d->last_dc[i] = s->last_dc[i];
2576 
2577  /* statistics */
2578  d->mv_bits= s->mv_bits;
2579  d->i_tex_bits= s->i_tex_bits;
2580  d->p_tex_bits= s->p_tex_bits;
2581  d->i_count= s->i_count;
2582  d->misc_bits= s->misc_bits;
2583 
2584  d->mb_intra= s->mb_intra;
2585  d->mb_skipped= s->mb_skipped;
2586  d->mv_type= s->mv_type;
2587  d->mv_dir= s->mv_dir;
2588  d->pb= s->pb;
2589  if(s->data_partitioning){
2590  d->pb2= s->pb2;
2591  d->tex_pb= s->tex_pb;
2592  }
2593  d->block= s->block;
2594  for(i=0; i<8; i++)
2595  d->block_last_index[i]= s->block_last_index[i];
2596  d->interlaced_dct= s->interlaced_dct;
2597  d->qscale= s->qscale;
2598 
2599  d->esc3_level_length= s->esc3_level_length;
2600 }
2601 
2604  int *dmin, int *next_block, int motion_x, int motion_y)
2605 {
2606  int score;
2607  uint8_t *dest_backup[3];
2608 
2609  copy_context_before_encode(s, backup);
2610 
2611  s->block= s->blocks[*next_block];
2612  s->pb= pb[*next_block];
2613  if(s->data_partitioning){
2614  s->pb2 = pb2 [*next_block];
2615  s->tex_pb= tex_pb[*next_block];
2616  }
2617 
2618  if(*next_block){
2619  memcpy(dest_backup, s->dest, sizeof(s->dest));
2620  s->dest[0] = s->sc.rd_scratchpad;
2621  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2622  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2623  av_assert0(s->linesize >= 32); //FIXME
2624  }
2625 
2626  encode_mb(s, motion_x, motion_y);
2627 
2628  score= put_bits_count(&s->pb);
2629  if(s->data_partitioning){
2630  score+= put_bits_count(&s->pb2);
2631  score+= put_bits_count(&s->tex_pb);
2632  }
2633 
2634  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2635  mpv_reconstruct_mb(s, s->block);
2636 
2637  score *= s->lambda2;
2638  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2639  }
2640 
2641  if(*next_block){
2642  memcpy(s->dest, dest_backup, sizeof(s->dest));
2643  }
2644 
2645  if(score<*dmin){
2646  *dmin= score;
2647  *next_block^=1;
2648 
2650  }
2651 }
2652 
2653 static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride){
2654  const uint32_t *sq = ff_square_tab + 256;
2655  int acc=0;
2656  int x,y;
2657 
2658  if(w==16 && h==16)
2659  return s->sse_cmp[0](NULL, src1, src2, stride, 16);
2660  else if(w==8 && h==8)
2661  return s->sse_cmp[1](NULL, src1, src2, stride, 8);
2662 
2663  for(y=0; y<h; y++){
2664  for(x=0; x<w; x++){
2665  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2666  }
2667  }
2668 
2669  av_assert2(acc>=0);
2670 
2671  return acc;
2672 }
2673 
2674 static int sse_mb(MpegEncContext *s){
2675  int w= 16;
2676  int h= 16;
2677  int chroma_mb_w = w >> s->chroma_x_shift;
2678  int chroma_mb_h = h >> s->chroma_y_shift;
2679 
2680  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2681  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2682 
2683  if(w==16 && h==16)
2684  return s->n_sse_cmp[0](s, s->new_pic->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2685  s->dest[0], s->linesize, 16) +
2686  s->n_sse_cmp[1](s, s->new_pic->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2687  s->dest[1], s->uvlinesize, chroma_mb_h) +
2688  s->n_sse_cmp[1](s, s->new_pic->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2689  s->dest[2], s->uvlinesize, chroma_mb_h);
2690  else
2691  return sse(s, s->new_pic->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2692  s->dest[0], w, h, s->linesize) +
2693  sse(s, s->new_pic->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2694  s->dest[1], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize) +
2695  sse(s, s->new_pic->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2696  s->dest[2], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize);
2697 }
2698 
2700  MpegEncContext *s= *(void**)arg;
2701 
2702 
2703  s->me.pre_pass=1;
2704  s->me.dia_size= s->avctx->pre_dia_size;
2705  s->first_slice_line=1;
2706  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2707  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2708  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2709  }
2710  s->first_slice_line=0;
2711  }
2712 
2713  s->me.pre_pass=0;
2714 
2715  return 0;
2716 }
2717 
2719  MpegEncContext *s= *(void**)arg;
2720 
2721  s->me.dia_size= s->avctx->dia_size;
2722  s->first_slice_line=1;
2723  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2724  s->mb_x=0; //for block init below
2726  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2727  s->block_index[0]+=2;
2728  s->block_index[1]+=2;
2729  s->block_index[2]+=2;
2730  s->block_index[3]+=2;
2731 
2732  /* compute motion vector & mb_type and store in context */
2733  if(s->pict_type==AV_PICTURE_TYPE_B)
2734  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2735  else
2736  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2737  }
2738  s->first_slice_line=0;
2739  }
2740  return 0;
2741 }
2742 
2743 static int mb_var_thread(AVCodecContext *c, void *arg){
2744  MpegEncContext *s= *(void**)arg;
2745  int mb_x, mb_y;
2746 
2747  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2748  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2749  int xx = mb_x * 16;
2750  int yy = mb_y * 16;
2751  const uint8_t *pix = s->new_pic->data[0] + (yy * s->linesize) + xx;
2752  int varc;
2753  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2754 
2755  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2756  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2757 
2758  s->mb_var [s->mb_stride * mb_y + mb_x] = varc;
2759  s->mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2760  s->me.mb_var_sum_temp += varc;
2761  }
2762  }
2763  return 0;
2764 }
2765 
2767  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2768  if(s->partitioned_frame){
2770  }
2771 
2772  ff_mpeg4_stuffing(&s->pb);
2773  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2774  s->out_format == FMT_MJPEG) {
2776  } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2778  }
2779 
2780  flush_put_bits(&s->pb);
2781 
2782  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2783  s->misc_bits+= get_bits_diff(s);
2784 }
2785 
2787 {
2788  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2789  int offset = put_bits_count(&s->pb);
2790  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2791  int gobn = s->mb_y / s->gob_index;
2792  int pred_x, pred_y;
2793  if (CONFIG_H263_ENCODER)
2794  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2795  bytestream_put_le32(&ptr, offset);
2796  bytestream_put_byte(&ptr, s->qscale);
2797  bytestream_put_byte(&ptr, gobn);
2798  bytestream_put_le16(&ptr, mba);
2799  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2800  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2801  /* 4MV not implemented */
2802  bytestream_put_byte(&ptr, 0); /* hmv2 */
2803  bytestream_put_byte(&ptr, 0); /* vmv2 */
2804 }
2805 
2806 static void update_mb_info(MpegEncContext *s, int startcode)
2807 {
2808  if (!s->mb_info)
2809  return;
2810  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2811  s->mb_info_size += 12;
2812  s->prev_mb_info = s->last_mb_info;
2813  }
2814  if (startcode) {
2815  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2816  /* This might have incremented mb_info_size above, and we return without
2817  * actually writing any info into that slot yet. But in that case,
2818  * this will be called again at the start of the after writing the
2819  * start code, actually writing the mb info. */
2820  return;
2821  }
2822 
2823  s->last_mb_info = put_bytes_count(&s->pb, 0);
2824  if (!s->mb_info_size)
2825  s->mb_info_size += 12;
2826  write_mb_info(s);
2827 }
2828 
2829 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2830 {
2831  if (put_bytes_left(&s->pb, 0) < threshold
2832  && s->slice_context_count == 1
2833  && s->pb.buf == s->avctx->internal->byte_buffer) {
2834  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2835 
2836  uint8_t *new_buffer = NULL;
2837  int new_buffer_size = 0;
2838 
2839  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2840  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2841  return AVERROR(ENOMEM);
2842  }
2843 
2844  emms_c();
2845 
2846  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2847  s->avctx->internal->byte_buffer_size + size_increase);
2848  if (!new_buffer)
2849  return AVERROR(ENOMEM);
2850 
2851  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2852  av_free(s->avctx->internal->byte_buffer);
2853  s->avctx->internal->byte_buffer = new_buffer;
2854  s->avctx->internal->byte_buffer_size = new_buffer_size;
2855  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2856  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2857  }
2858  if (put_bytes_left(&s->pb, 0) < threshold)
2859  return AVERROR(EINVAL);
2860  return 0;
2861 }
2862 
2863 static int encode_thread(AVCodecContext *c, void *arg){
2864  MpegEncContext *s= *(void**)arg;
2865  int mb_x, mb_y, mb_y_order;
2866  int chr_h= 16>>s->chroma_y_shift;
2867  int i, j;
2868  MpegEncContext best_s = { 0 }, backup_s;
2869  uint8_t bit_buf[2][MAX_MB_BYTES];
2870  uint8_t bit_buf2[2][MAX_MB_BYTES];
2871  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2872  PutBitContext pb[2], pb2[2], tex_pb[2];
2873 
2874  for(i=0; i<2; i++){
2875  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2876  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2877  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2878  }
2879 
2880  s->last_bits= put_bits_count(&s->pb);
2881  s->mv_bits=0;
2882  s->misc_bits=0;
2883  s->i_tex_bits=0;
2884  s->p_tex_bits=0;
2885  s->i_count=0;
2886 
2887  for(i=0; i<3; i++){
2888  /* init last dc values */
2889  /* note: quant matrix value (8) is implied here */
2890  s->last_dc[i] = 128 << s->intra_dc_precision;
2891 
2892  s->encoding_error[i] = 0;
2893  }
2894  if(s->codec_id==AV_CODEC_ID_AMV){
2895  s->last_dc[0] = 128*8/13;
2896  s->last_dc[1] = 128*8/14;
2897  s->last_dc[2] = 128*8/14;
2898  }
2899  s->mb_skip_run = 0;
2900  memset(s->last_mv, 0, sizeof(s->last_mv));
2901 
2902  s->last_mv_dir = 0;
2903 
2904  switch(s->codec_id){
2905  case AV_CODEC_ID_H263:
2906  case AV_CODEC_ID_H263P:
2907  case AV_CODEC_ID_FLV1:
2908  if (CONFIG_H263_ENCODER)
2909  s->gob_index = H263_GOB_HEIGHT(s->height);
2910  break;
2911  case AV_CODEC_ID_MPEG4:
2912  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2914  break;
2915  }
2916 
2917  s->resync_mb_x=0;
2918  s->resync_mb_y=0;
2919  s->first_slice_line = 1;
2920  s->ptr_lastgob = s->pb.buf;
2921  for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2922  if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2923  int first_in_slice;
2924  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2925  if (first_in_slice && mb_y_order != s->start_mb_y)
2927  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
2928  } else {
2929  mb_y = mb_y_order;
2930  }
2931  s->mb_x=0;
2932  s->mb_y= mb_y;
2933 
2934  ff_set_qscale(s, s->qscale);
2936 
2937  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2938  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2939  int mb_type= s->mb_type[xy];
2940 // int d;
2941  int dmin= INT_MAX;
2942  int dir;
2943  int size_increase = s->avctx->internal->byte_buffer_size/4
2944  + s->mb_width*MAX_MB_BYTES;
2945 
2947  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
2948  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2949  return -1;
2950  }
2951  if(s->data_partitioning){
2952  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
2953  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
2954  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2955  return -1;
2956  }
2957  }
2958 
2959  s->mb_x = mb_x;
2960  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2961  ff_update_block_index(s, 8, 0, s->chroma_x_shift);
2962 
2963  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2965  xy= s->mb_y*s->mb_stride + s->mb_x;
2966  mb_type= s->mb_type[xy];
2967  }
2968 
2969  /* write gob / video packet header */
2970  if(s->rtp_mode){
2971  int current_packet_size, is_gob_start;
2972 
2973  current_packet_size = put_bytes_count(&s->pb, 1)
2974  - (s->ptr_lastgob - s->pb.buf);
2975 
2976  is_gob_start = s->rtp_payload_size &&
2977  current_packet_size >= s->rtp_payload_size &&
2978  mb_y + mb_x > 0;
2979 
2980  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2981 
2982  switch(s->codec_id){
2983  case AV_CODEC_ID_H263:
2984  case AV_CODEC_ID_H263P:
2985  if(!s->h263_slice_structured)
2986  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2987  break;
2989  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2991  if(s->mb_skip_run) is_gob_start=0;
2992  break;
2993  case AV_CODEC_ID_MJPEG:
2994  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2995  break;
2996  }
2997 
2998  if(is_gob_start){
2999  if(s->start_mb_y != mb_y || mb_x!=0){
3000  write_slice_end(s);
3001 
3002  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3004  }
3005  }
3006 
3007  av_assert2((put_bits_count(&s->pb)&7) == 0);
3008  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3009 
3010  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3011  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3012  int d = 100 / s->error_rate;
3013  if(r % d == 0){
3014  current_packet_size=0;
3015  s->pb.buf_ptr= s->ptr_lastgob;
3016  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3017  }
3018  }
3019 
3020  switch(s->codec_id){
3021  case AV_CODEC_ID_MPEG4:
3022  if (CONFIG_MPEG4_ENCODER) {
3025  }
3026  break;
3029  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3032  }
3033  break;
3034  case AV_CODEC_ID_H263:
3035  case AV_CODEC_ID_H263P:
3036  if (CONFIG_H263_ENCODER) {
3037  update_mb_info(s, 1);
3039  }
3040  break;
3041  }
3042 
3043  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3044  int bits= put_bits_count(&s->pb);
3045  s->misc_bits+= bits - s->last_bits;
3046  s->last_bits= bits;
3047  }
3048 
3049  s->ptr_lastgob += current_packet_size;
3050  s->first_slice_line=1;
3051  s->resync_mb_x=mb_x;
3052  s->resync_mb_y=mb_y;
3053  }
3054  }
3055 
3056  if( (s->resync_mb_x == s->mb_x)
3057  && s->resync_mb_y+1 == s->mb_y){
3058  s->first_slice_line=0;
3059  }
3060 
3061  s->mb_skipped=0;
3062  s->dquant=0; //only for QP_RD
3063 
3064  update_mb_info(s, 0);
3065 
3066  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3067  int next_block=0;
3068  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3069 
3070  copy_context_before_encode(&backup_s, s);
3071  backup_s.pb= s->pb;
3072  best_s.data_partitioning= s->data_partitioning;
3073  best_s.partitioned_frame= s->partitioned_frame;
3074  if(s->data_partitioning){
3075  backup_s.pb2= s->pb2;
3076  backup_s.tex_pb= s->tex_pb;
3077  }
3078 
3080  s->mv_dir = MV_DIR_FORWARD;
3081  s->mv_type = MV_TYPE_16X16;
3082  s->mb_intra= 0;
3083  s->mv[0][0][0] = s->p_mv_table[xy][0];
3084  s->mv[0][0][1] = s->p_mv_table[xy][1];
3085  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3086  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3087  }
3089  s->mv_dir = MV_DIR_FORWARD;
3090  s->mv_type = MV_TYPE_FIELD;
3091  s->mb_intra= 0;
3092  for(i=0; i<2; i++){
3093  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3094  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3095  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3096  }
3097  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3098  &dmin, &next_block, 0, 0);
3099  }
3101  s->mv_dir = MV_DIR_FORWARD;
3102  s->mv_type = MV_TYPE_16X16;
3103  s->mb_intra= 0;
3104  s->mv[0][0][0] = 0;
3105  s->mv[0][0][1] = 0;
3106  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3107  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3108  }
3110  s->mv_dir = MV_DIR_FORWARD;
3111  s->mv_type = MV_TYPE_8X8;
3112  s->mb_intra= 0;
3113  for(i=0; i<4; i++){
3114  s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0];
3115  s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1];
3116  }
3117  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3118  &dmin, &next_block, 0, 0);
3119  }
3121  s->mv_dir = MV_DIR_FORWARD;
3122  s->mv_type = MV_TYPE_16X16;
3123  s->mb_intra= 0;
3124  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3125  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3126  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3127  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3128  }
3130  s->mv_dir = MV_DIR_BACKWARD;
3131  s->mv_type = MV_TYPE_16X16;
3132  s->mb_intra= 0;
3133  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3134  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3135  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3136  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3137  }
3139  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3140  s->mv_type = MV_TYPE_16X16;
3141  s->mb_intra= 0;
3142  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3143  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3144  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3145  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3146  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3147  &dmin, &next_block, 0, 0);
3148  }
3150  s->mv_dir = MV_DIR_FORWARD;
3151  s->mv_type = MV_TYPE_FIELD;
3152  s->mb_intra= 0;
3153  for(i=0; i<2; i++){
3154  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3155  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3156  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3157  }
3158  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3159  &dmin, &next_block, 0, 0);
3160  }
3162  s->mv_dir = MV_DIR_BACKWARD;
3163  s->mv_type = MV_TYPE_FIELD;
3164  s->mb_intra= 0;
3165  for(i=0; i<2; i++){
3166  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3167  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3168  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3169  }
3170  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3171  &dmin, &next_block, 0, 0);
3172  }
3174  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3175  s->mv_type = MV_TYPE_FIELD;
3176  s->mb_intra= 0;
3177  for(dir=0; dir<2; dir++){
3178  for(i=0; i<2; i++){
3179  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3180  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3181  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3182  }
3183  }
3184  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3185  &dmin, &next_block, 0, 0);
3186  }
3188  s->mv_dir = 0;
3189  s->mv_type = MV_TYPE_16X16;
3190  s->mb_intra= 1;
3191  s->mv[0][0][0] = 0;
3192  s->mv[0][0][1] = 0;
3193  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3194  &dmin, &next_block, 0, 0);
3195  if(s->h263_pred || s->h263_aic){
3196  if(best_s.mb_intra)
3197  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3198  else
3199  ff_clean_intra_table_entries(s); //old mode?
3200  }
3201  }
3202 
3203  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3204  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3205  const int last_qp= backup_s.qscale;
3206  int qpi, qp, dc[6];
3207  int16_t ac[6][16];
3208  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3209  static const int dquant_tab[4]={-1,1,-2,2};
3210  int storecoefs = s->mb_intra && s->dc_val[0];
3211 
3212  av_assert2(backup_s.dquant == 0);
3213 
3214  //FIXME intra
3215  s->mv_dir= best_s.mv_dir;
3216  s->mv_type = MV_TYPE_16X16;
3217  s->mb_intra= best_s.mb_intra;
3218  s->mv[0][0][0] = best_s.mv[0][0][0];
3219  s->mv[0][0][1] = best_s.mv[0][0][1];
3220  s->mv[1][0][0] = best_s.mv[1][0][0];
3221  s->mv[1][0][1] = best_s.mv[1][0][1];
3222 
3223  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3224  for(; qpi<4; qpi++){
3225  int dquant= dquant_tab[qpi];
3226  qp= last_qp + dquant;
3227  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3228  continue;
3229  backup_s.dquant= dquant;
3230  if(storecoefs){
3231  for(i=0; i<6; i++){
3232  dc[i]= s->dc_val[0][ s->block_index[i] ];
3233  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3234  }
3235  }
3236 
3237  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3238  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3239  if(best_s.qscale != qp){
3240  if(storecoefs){
3241  for(i=0; i<6; i++){
3242  s->dc_val[0][ s->block_index[i] ]= dc[i];
3243  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3244  }
3245  }
3246  }
3247  }
3248  }
3249  }
3250  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3251  int mx= s->b_direct_mv_table[xy][0];
3252  int my= s->b_direct_mv_table[xy][1];
3253 
3254  backup_s.dquant = 0;
3255  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3256  s->mb_intra= 0;
3258  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3259  &dmin, &next_block, mx, my);
3260  }
3261  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3262  backup_s.dquant = 0;
3263  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3264  s->mb_intra= 0;
3265  ff_mpeg4_set_direct_mv(s, 0, 0);
3266  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3267  &dmin, &next_block, 0, 0);
3268  }
3269  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3270  int coded=0;
3271  for(i=0; i<6; i++)
3272  coded |= s->block_last_index[i];
3273  if(coded){
3274  int mx,my;
3275  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3276  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3277  mx=my=0; //FIXME find the one we actually used
3279  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3280  mx= s->mv[1][0][0];
3281  my= s->mv[1][0][1];
3282  }else{
3283  mx= s->mv[0][0][0];
3284  my= s->mv[0][0][1];
3285  }
3286 
3287  s->mv_dir= best_s.mv_dir;
3288  s->mv_type = best_s.mv_type;
3289  s->mb_intra= 0;
3290 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3291  s->mv[0][0][1] = best_s.mv[0][0][1];
3292  s->mv[1][0][0] = best_s.mv[1][0][0];
3293  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3294  backup_s.dquant= 0;
3295  s->skipdct=1;
3296  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3297  &dmin, &next_block, mx, my);
3298  s->skipdct=0;
3299  }
3300  }
3301 
3302  s->cur_pic.qscale_table[xy] = best_s.qscale;
3303 
3304  copy_context_after_encode(s, &best_s);
3305 
3306  pb_bits_count= put_bits_count(&s->pb);
3307  flush_put_bits(&s->pb);
3308  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3309  s->pb= backup_s.pb;
3310 
3311  if(s->data_partitioning){
3312  pb2_bits_count= put_bits_count(&s->pb2);
3313  flush_put_bits(&s->pb2);
3314  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3315  s->pb2= backup_s.pb2;
3316 
3317  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3318  flush_put_bits(&s->tex_pb);
3319  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3320  s->tex_pb= backup_s.tex_pb;
3321  }
3322  s->last_bits= put_bits_count(&s->pb);
3323 
3324  if (CONFIG_H263_ENCODER &&
3325  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3327 
3328  if(next_block==0){ //FIXME 16 vs linesize16
3329  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3330  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3331  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3332  }
3333 
3334  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3335  mpv_reconstruct_mb(s, s->block);
3336  } else {
3337  int motion_x = 0, motion_y = 0;
3338  s->mv_type=MV_TYPE_16X16;
3339  // only one MB-Type possible
3340 
3341  switch(mb_type){
3343  s->mv_dir = 0;
3344  s->mb_intra= 1;
3345  motion_x= s->mv[0][0][0] = 0;
3346  motion_y= s->mv[0][0][1] = 0;
3347  break;
3349  s->mv_dir = MV_DIR_FORWARD;
3350  s->mb_intra= 0;
3351  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3352  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3353  break;
3355  s->mv_dir = MV_DIR_FORWARD;
3356  s->mv_type = MV_TYPE_FIELD;
3357  s->mb_intra= 0;
3358  for(i=0; i<2; i++){
3359  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3360  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3361  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3362  }
3363  break;
3365  s->mv_dir = MV_DIR_FORWARD;
3366  s->mv_type = MV_TYPE_8X8;
3367  s->mb_intra= 0;
3368  for(i=0; i<4; i++){
3369  s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0];
3370  s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1];
3371  }
3372  break;
3374  if (CONFIG_MPEG4_ENCODER) {
3376  s->mb_intra= 0;
3377  motion_x=s->b_direct_mv_table[xy][0];
3378  motion_y=s->b_direct_mv_table[xy][1];
3379  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3380  }
3381  break;
3383  if (CONFIG_MPEG4_ENCODER) {
3385  s->mb_intra= 0;
3386  ff_mpeg4_set_direct_mv(s, 0, 0);
3387  }
3388  break;
3390  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3391  s->mb_intra= 0;
3392  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3393  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3394  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3395  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3396  break;
3398  s->mv_dir = MV_DIR_BACKWARD;
3399  s->mb_intra= 0;
3400  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3401  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3402  break;
3404  s->mv_dir = MV_DIR_FORWARD;
3405  s->mb_intra= 0;
3406  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3407  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3408  break;
3410  s->mv_dir = MV_DIR_FORWARD;
3411  s->mv_type = MV_TYPE_FIELD;
3412  s->mb_intra= 0;
3413  for(i=0; i<2; i++){
3414  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3415  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3416  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3417  }
3418  break;
3420  s->mv_dir = MV_DIR_BACKWARD;
3421  s->mv_type = MV_TYPE_FIELD;
3422  s->mb_intra= 0;
3423  for(i=0; i<2; i++){
3424  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3425  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3426  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3427  }
3428  break;
3430  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3431  s->mv_type = MV_TYPE_FIELD;
3432  s->mb_intra= 0;
3433  for(dir=0; dir<2; dir++){
3434  for(i=0; i<2; i++){
3435  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3436  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3437  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3438  }
3439  }
3440  break;
3441  default:
3442  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3443  }
3444 
3445  encode_mb(s, motion_x, motion_y);
3446 
3447  // RAL: Update last macroblock type
3448  s->last_mv_dir = s->mv_dir;
3449 
3450  if (CONFIG_H263_ENCODER &&
3451  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3453 
3454  mpv_reconstruct_mb(s, s->block);
3455  }
3456 
3457  /* clean the MV table in IPS frames for direct mode in B-frames */
3458  if(s->mb_intra /* && I,P,S_TYPE */){
3459  s->p_mv_table[xy][0]=0;
3460  s->p_mv_table[xy][1]=0;
3461  }
3462 
3463  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3464  int w= 16;
3465  int h= 16;
3466 
3467  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3468  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3469 
3470  s->encoding_error[0] += sse(
3471  s, s->new_pic->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3472  s->dest[0], w, h, s->linesize);
3473  s->encoding_error[1] += sse(
3474  s, s->new_pic->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3475  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3476  s->encoding_error[2] += sse(
3477  s, s->new_pic->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3478  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3479  }
3480  if(s->loop_filter){
3481  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3483  }
3484  ff_dlog(s->avctx, "MB %d %d bits\n",
3485  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3486  }
3487  }
3488 
3489 #if CONFIG_MSMPEG4ENC
3490  //not beautiful here but we must write it before flushing so it has to be here
3491  if (s->msmpeg4_version != MSMP4_UNUSED && s->msmpeg4_version < MSMP4_WMV1 &&
3492  s->pict_type == AV_PICTURE_TYPE_I)
3494 #endif
3495 
3496  write_slice_end(s);
3497 
3498  return 0;
3499 }
3500 
3501 #define MERGE(field) dst->field += src->field; src->field=0
3503  MERGE(me.scene_change_score);
3504  MERGE(me.mc_mb_var_sum_temp);
3505  MERGE(me.mb_var_sum_temp);
3506 }
3507 
3509  int i;
3510 
3511  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3512  MERGE(dct_count[1]);
3513  MERGE(mv_bits);
3514  MERGE(i_tex_bits);
3515  MERGE(p_tex_bits);
3516  MERGE(i_count);
3517  MERGE(misc_bits);
3518  MERGE(encoding_error[0]);
3519  MERGE(encoding_error[1]);
3520  MERGE(encoding_error[2]);
3521 
3522  if (dst->noise_reduction){
3523  for(i=0; i<64; i++){
3524  MERGE(dct_error_sum[0][i]);
3525  MERGE(dct_error_sum[1][i]);
3526  }
3527  }
3528 
3529  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3530  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3531  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3532  flush_put_bits(&dst->pb);
3533 }
3534 
3535 static int estimate_qp(MpegEncContext *s, int dry_run){
3536  if (s->next_lambda){
3537  s->cur_pic.ptr->f->quality = s->next_lambda;
3538  if(!dry_run) s->next_lambda= 0;
3539  } else if (!s->fixed_qscale) {
3540  int quality = ff_rate_estimate_qscale(s, dry_run);
3541  s->cur_pic.ptr->f->quality = quality;
3542  if (s->cur_pic.ptr->f->quality < 0)
3543  return -1;
3544  }
3545 
3546  if(s->adaptive_quant){
3547  init_qscale_tab(s);
3548 
3549  switch(s->codec_id){
3550  case AV_CODEC_ID_MPEG4:
3551  if (CONFIG_MPEG4_ENCODER)
3553  break;
3554  case AV_CODEC_ID_H263:
3555  case AV_CODEC_ID_H263P:
3556  case AV_CODEC_ID_FLV1:
3557  if (CONFIG_H263_ENCODER)
3559  break;
3560  }
3561 
3562  s->lambda= s->lambda_table[0];
3563  //FIXME broken
3564  }else
3565  s->lambda = s->cur_pic.ptr->f->quality;
3566  update_qscale(s);
3567  return 0;
3568 }
3569 
3570 /* must be called before writing the header */
3572  av_assert1(s->cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3573  s->time = s->cur_pic.ptr->f->pts * s->avctx->time_base.num;
3574 
3575  if(s->pict_type==AV_PICTURE_TYPE_B){
3576  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3577  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3578  }else{
3579  s->pp_time= s->time - s->last_non_b_time;
3580  s->last_non_b_time= s->time;
3581  av_assert1(s->picture_number==0 || s->pp_time > 0);
3582  }
3583 }
3584 
3586 {
3587  int i, ret;
3588  int bits;
3589  int context_count = s->slice_context_count;
3590 
3591  /* Reset the average MB variance */
3592  s->me.mb_var_sum_temp =
3593  s->me.mc_mb_var_sum_temp = 0;
3594 
3595  /* we need to initialize some time vars before we can encode B-frames */
3596  // RAL: Condition added for MPEG1VIDEO
3597  if (s->out_format == FMT_MPEG1 || (s->h263_pred && s->msmpeg4_version == MSMP4_UNUSED))
3599  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3601 
3602  s->me.scene_change_score=0;
3603 
3604 // s->lambda= s->cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3605 
3606  if(s->pict_type==AV_PICTURE_TYPE_I){
3607  s->no_rounding = s->msmpeg4_version >= MSMP4_V3;
3608  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3609  s->no_rounding ^= s->flipflop_rounding;
3610  }
3611 
3612  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3613  if (estimate_qp(s,1) < 0)
3614  return -1;
3616  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3617  if(s->pict_type==AV_PICTURE_TYPE_B)
3618  s->lambda= s->last_lambda_for[s->pict_type];
3619  else
3620  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3621  update_qscale(s);
3622  }
3623 
3624  if (s->out_format != FMT_MJPEG) {
3625  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3626  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3627  s->q_chroma_intra_matrix = s->q_intra_matrix;
3628  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3629  }
3630 
3631  ff_me_init_pic(s);
3632 
3633  s->mb_intra=0; //for the rate distortion & bit compare functions
3634  for (int i = 0; i < context_count; i++) {
3635  MpegEncContext *const slice = s->thread_context[i];
3636  uint8_t *start, *end;
3637  int h;
3638 
3639  if (i) {
3640  ret = ff_update_duplicate_context(slice, s);
3641  if (ret < 0)
3642  return ret;
3643  }
3644  slice->me.temp = slice->me.scratchpad = slice->sc.scratchpad_buf;
3645 
3646  h = s->mb_height;
3647  start = pkt->data + (size_t)(((int64_t) pkt->size) * slice->start_mb_y / h);
3648  end = pkt->data + (size_t)(((int64_t) pkt->size) * slice-> end_mb_y / h);
3649 
3650  init_put_bits(&s->thread_context[i]->pb, start, end - start);
3651  }
3652 
3653  /* Estimate motion for every MB */
3654  if(s->pict_type != AV_PICTURE_TYPE_I){
3655  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3656  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3657  if (s->pict_type != AV_PICTURE_TYPE_B) {
3658  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3659  s->me_pre == 2) {
3660  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3661  }
3662  }
3663 
3664  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3665  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3666  /* I-Frame */
3667  for(i=0; i<s->mb_stride*s->mb_height; i++)
3668  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3669 
3670  if(!s->fixed_qscale){
3671  /* finding spatial complexity for I-frame rate control */
3672  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3673  }
3674  }
3675  for(i=1; i<context_count; i++){
3676  merge_context_after_me(s, s->thread_context[i]);
3677  }
3678  s->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3679  s->mb_var_sum = s->me. mb_var_sum_temp;
3680  emms_c();
3681 
3682  if (s->me.scene_change_score > s->scenechange_threshold &&
3683  s->pict_type == AV_PICTURE_TYPE_P) {
3684  s->pict_type= AV_PICTURE_TYPE_I;
3685  for(i=0; i<s->mb_stride*s->mb_height; i++)
3686  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3687  if (s->msmpeg4_version >= MSMP4_V3)
3688  s->no_rounding=1;
3689  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3690  s->mb_var_sum, s->mc_mb_var_sum);
3691  }
3692 
3693  if(!s->umvplus){
3694  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3695  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3696 
3697  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3698  int a,b;
3699  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3700  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3701  s->f_code= FFMAX3(s->f_code, a, b);
3702  }
3703 
3705  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3706  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3707  int j;
3708  for(i=0; i<2; i++){
3709  for(j=0; j<2; j++)
3710  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3711  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3712  }
3713  }
3714  } else if (s->pict_type == AV_PICTURE_TYPE_B) {
3715  int a, b;
3716 
3717  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3718  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3719  s->f_code = FFMAX(a, b);
3720 
3721  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3722  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3723  s->b_code = FFMAX(a, b);
3724 
3725  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3726  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3727  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3728  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3729  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3730  int dir, j;
3731  for(dir=0; dir<2; dir++){
3732  for(i=0; i<2; i++){
3733  for(j=0; j<2; j++){
3736  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3737  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3738  }
3739  }
3740  }
3741  }
3742  }
3743  }
3744 
3745  if (estimate_qp(s, 0) < 0)
3746  return -1;
3747 
3748  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3749  s->pict_type == AV_PICTURE_TYPE_I &&
3750  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3751  s->qscale= 3; //reduce clipping problems
3752 
3753  if (s->out_format == FMT_MJPEG) {
3754  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3755  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3756 
3757  if (s->avctx->intra_matrix) {
3758  chroma_matrix =
3759  luma_matrix = s->avctx->intra_matrix;
3760  }
3761  if (s->avctx->chroma_intra_matrix)
3762  chroma_matrix = s->avctx->chroma_intra_matrix;
3763 
3764  /* for mjpeg, we do include qscale in the matrix */
3765  for(i=1;i<64;i++){
3766  int j = s->idsp.idct_permutation[i];
3767 
3768  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3769  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3770  }
3771  s->y_dc_scale_table=
3772  s->c_dc_scale_table = ff_mpeg12_dc_scale_table[s->intra_dc_precision];
3773  s->chroma_intra_matrix[0] =
3774  s->intra_matrix[0] = ff_mpeg12_dc_scale_table[s->intra_dc_precision][8];
3775  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3776  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3777  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3778  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3779  s->qscale= 8;
3780 
3781  if (s->codec_id == AV_CODEC_ID_AMV) {
3782  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3783  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3784  for (int i = 1; i < 64; i++) {
3785  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
3786 
3787  s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3788  s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3789  }
3790  s->y_dc_scale_table = y;
3791  s->c_dc_scale_table = c;
3792  s->intra_matrix[0] = 13;
3793  s->chroma_intra_matrix[0] = 14;
3794  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3795  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3796  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3797  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3798  s->qscale = 8;
3799  }
3800  }
3801 
3802  if (s->pict_type == AV_PICTURE_TYPE_I) {
3803  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3804  } else {
3805  s->cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3806  }
3807  s->cur_pic.ptr->f->pict_type = s->pict_type;
3808 
3809  if (s->cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3810  s->picture_in_gop_number=0;
3811 
3812  s->mb_x = s->mb_y = 0;
3813  s->last_bits= put_bits_count(&s->pb);
3814  switch(s->out_format) {
3815 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3816  case FMT_MJPEG:
3818  break;
3819 #endif
3820  case FMT_SPEEDHQ:
3821  if (CONFIG_SPEEDHQ_ENCODER)
3823  break;
3824  case FMT_H261:
3825  if (CONFIG_H261_ENCODER)
3827  break;
3828  case FMT_H263:
3829  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3831 #if CONFIG_MSMPEG4ENC
3832  else if (s->msmpeg4_version != MSMP4_UNUSED)
3834 #endif
3835  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3837  if (ret < 0)
3838  return ret;
3839  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3841  if (ret < 0)
3842  return ret;
3843  }
3844  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3846  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3848  else if (CONFIG_H263_ENCODER)
3850  break;
3851  case FMT_MPEG1:
3852  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3854  break;
3855  default:
3856  av_assert0(0);
3857  }
3858  bits= put_bits_count(&s->pb);
3859  s->header_bits= bits - s->last_bits;
3860 
3861  for(i=1; i<context_count; i++){
3862  update_duplicate_context_after_me(s->thread_context[i], s);
3863  }
3864  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3865  for(i=1; i<context_count; i++){
3866  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3867  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3868  merge_context_after_encode(s, s->thread_context[i]);
3869  }
3870  emms_c();
3871  return 0;
3872 }
3873 
3874 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3875  const int intra= s->mb_intra;
3876  int i;
3877 
3878  s->dct_count[intra]++;
3879 
3880  for(i=0; i<64; i++){
3881  int level= block[i];
3882 
3883  if(level){
3884  if(level>0){
3885  s->dct_error_sum[intra][i] += level;
3886  level -= s->dct_offset[intra][i];
3887  if(level<0) level=0;
3888  }else{
3889  s->dct_error_sum[intra][i] -= level;
3890  level += s->dct_offset[intra][i];
3891  if(level>0) level=0;
3892  }
3893  block[i]= level;
3894  }
3895  }
3896 }
3897 
3899  int16_t *block, int n,
3900  int qscale, int *overflow){
3901  const int *qmat;
3902  const uint16_t *matrix;
3903  const uint8_t *scantable;
3904  const uint8_t *perm_scantable;
3905  int max=0;
3906  unsigned int threshold1, threshold2;
3907  int bias=0;
3908  int run_tab[65];
3909  int level_tab[65];
3910  int score_tab[65];
3911  int survivor[65];
3912  int survivor_count;
3913  int last_run=0;
3914  int last_level=0;
3915  int last_score= 0;
3916  int last_i;
3917  int coeff[2][64];
3918  int coeff_count[64];
3919  int qmul, qadd, start_i, last_non_zero, i, dc;
3920  const int esc_length= s->ac_esc_length;
3921  const uint8_t *length, *last_length;
3922  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3923  int mpeg2_qscale;
3924 
3925  s->fdsp.fdct(block);
3926 
3927  if(s->dct_error_sum)
3928  s->denoise_dct(s, block);
3929  qmul= qscale*16;
3930  qadd= ((qscale-1)|1)*8;
3931 
3932  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3933  else mpeg2_qscale = qscale << 1;
3934 
3935  if (s->mb_intra) {
3936  int q;
3937  scantable= s->intra_scantable.scantable;
3938  perm_scantable= s->intra_scantable.permutated;
3939  if (!s->h263_aic) {
3940  if (n < 4)
3941  q = s->y_dc_scale;
3942  else
3943  q = s->c_dc_scale;
3944  q = q << 3;
3945  } else{
3946  /* For AIC we skip quant/dequant of INTRADC */
3947  q = 1 << 3;
3948  qadd=0;
3949  }
3950 
3951  /* note: block[0] is assumed to be positive */
3952  block[0] = (block[0] + (q >> 1)) / q;
3953  start_i = 1;
3954  last_non_zero = 0;
3955  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3956  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3957  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3958  bias= 1<<(QMAT_SHIFT-1);
3959 
3960  if (n > 3 && s->intra_chroma_ac_vlc_length) {
3961  length = s->intra_chroma_ac_vlc_length;
3962  last_length= s->intra_chroma_ac_vlc_last_length;
3963  } else {
3964  length = s->intra_ac_vlc_length;
3965  last_length= s->intra_ac_vlc_last_length;
3966  }
3967  } else {
3968  scantable= s->inter_scantable.scantable;
3969  perm_scantable= s->inter_scantable.permutated;
3970  start_i = 0;
3971  last_non_zero = -1;
3972  qmat = s->q_inter_matrix[qscale];
3973  matrix = s->inter_matrix;
3974  length = s->inter_ac_vlc_length;
3975  last_length= s->inter_ac_vlc_last_length;
3976  }
3977  last_i= start_i;
3978 
3979  threshold1= (1<<QMAT_SHIFT) - bias - 1;
3980  threshold2= (threshold1<<1);
3981 
3982  for(i=63; i>=start_i; i--) {
3983  const int j = scantable[i];
3984  int level = block[j] * qmat[j];
3985 
3986  if(((unsigned)(level+threshold1))>threshold2){
3987  last_non_zero = i;
3988  break;
3989  }
3990  }
3991 
3992  for(i=start_i; i<=last_non_zero; i++) {
3993  const int j = scantable[i];
3994  int level = block[j] * qmat[j];
3995 
3996 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3997 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3998  if(((unsigned)(level+threshold1))>threshold2){
3999  if(level>0){
4000  level= (bias + level)>>QMAT_SHIFT;
4001  coeff[0][i]= level;
4002  coeff[1][i]= level-1;
4003 // coeff[2][k]= level-2;
4004  }else{
4005  level= (bias - level)>>QMAT_SHIFT;
4006  coeff[0][i]= -level;
4007  coeff[1][i]= -level+1;
4008 // coeff[2][k]= -level+2;
4009  }
4010  coeff_count[i]= FFMIN(level, 2);
4011  av_assert2(coeff_count[i]);
4012  max |=level;
4013  }else{
4014  coeff[0][i]= (level>>31)|1;
4015  coeff_count[i]= 1;
4016  }
4017  }
4018 
4019  *overflow= s->max_qcoeff < max; //overflow might have happened
4020 
4021  if(last_non_zero < start_i){
4022  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4023  return last_non_zero;
4024  }
4025 
4026  score_tab[start_i]= 0;
4027  survivor[0]= start_i;
4028  survivor_count= 1;
4029 
4030  for(i=start_i; i<=last_non_zero; i++){
4031  int level_index, j, zero_distortion;
4032  int dct_coeff= FFABS(block[ scantable[i] ]);
4033  int best_score=256*256*256*120;
4034 
4035  if (s->fdsp.fdct == ff_fdct_ifast)
4036  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4037  zero_distortion= dct_coeff*dct_coeff;
4038 
4039  for(level_index=0; level_index < coeff_count[i]; level_index++){
4040  int distortion;
4041  int level= coeff[level_index][i];
4042  const int alevel= FFABS(level);
4043  int unquant_coeff;
4044 
4045  av_assert2(level);
4046 
4047  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4048  unquant_coeff= alevel*qmul + qadd;
4049  } else if(s->out_format == FMT_MJPEG) {
4050  j = s->idsp.idct_permutation[scantable[i]];
4051  unquant_coeff = alevel * matrix[j] * 8;
4052  }else{ // MPEG-1
4053  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4054  if(s->mb_intra){
4055  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4056  unquant_coeff = (unquant_coeff - 1) | 1;
4057  }else{
4058  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4059  unquant_coeff = (unquant_coeff - 1) | 1;
4060  }
4061  unquant_coeff<<= 3;
4062  }
4063 
4064  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4065  level+=64;
4066  if((level&(~127)) == 0){
4067  for(j=survivor_count-1; j>=0; j--){
4068  int run= i - survivor[j];
4069  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4070  score += score_tab[i-run];
4071 
4072  if(score < best_score){
4073  best_score= score;
4074  run_tab[i+1]= run;
4075  level_tab[i+1]= level-64;
4076  }
4077  }
4078 
4079  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4080  for(j=survivor_count-1; j>=0; j--){
4081  int run= i - survivor[j];
4082  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4083  score += score_tab[i-run];
4084  if(score < last_score){
4085  last_score= score;
4086  last_run= run;
4087  last_level= level-64;
4088  last_i= i+1;
4089  }
4090  }
4091  }
4092  }else{
4093  distortion += esc_length*lambda;
4094  for(j=survivor_count-1; j>=0; j--){
4095  int run= i - survivor[j];
4096  int score= distortion + score_tab[i-run];
4097 
4098  if(score < best_score){
4099  best_score= score;
4100  run_tab[i+1]= run;
4101  level_tab[i+1]= level-64;
4102  }
4103  }
4104 
4105  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4106  for(j=survivor_count-1; j>=0; j--){
4107  int run= i - survivor[j];
4108  int score= distortion + score_tab[i-run];
4109  if(score < last_score){
4110  last_score= score;
4111  last_run= run;
4112  last_level= level-64;
4113  last_i= i+1;
4114  }
4115  }
4116  }
4117  }
4118  }
4119 
4120  score_tab[i+1]= best_score;
4121 
4122  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4123  if(last_non_zero <= 27){
4124  for(; survivor_count; survivor_count--){
4125  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4126  break;
4127  }
4128  }else{
4129  for(; survivor_count; survivor_count--){
4130  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4131  break;
4132  }
4133  }
4134 
4135  survivor[ survivor_count++ ]= i+1;
4136  }
4137 
4138  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4139  last_score= 256*256*256*120;
4140  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4141  int score= score_tab[i];
4142  if (i)
4143  score += lambda * 2; // FIXME more exact?
4144 
4145  if(score < last_score){
4146  last_score= score;
4147  last_i= i;
4148  last_level= level_tab[i];
4149  last_run= run_tab[i];
4150  }
4151  }
4152  }
4153 
4154  s->coded_score[n] = last_score;
4155 
4156  dc= FFABS(block[0]);
4157  last_non_zero= last_i - 1;
4158  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4159 
4160  if(last_non_zero < start_i)
4161  return last_non_zero;
4162 
4163  if(last_non_zero == 0 && start_i == 0){
4164  int best_level= 0;
4165  int best_score= dc * dc;
4166 
4167  for(i=0; i<coeff_count[0]; i++){
4168  int level= coeff[i][0];
4169  int alevel= FFABS(level);
4170  int unquant_coeff, score, distortion;
4171 
4172  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4173  unquant_coeff= (alevel*qmul + qadd)>>3;
4174  } else{ // MPEG-1
4175  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4176  unquant_coeff = (unquant_coeff - 1) | 1;
4177  }
4178  unquant_coeff = (unquant_coeff + 4) >> 3;
4179  unquant_coeff<<= 3 + 3;
4180 
4181  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4182  level+=64;
4183  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4184  else score= distortion + esc_length*lambda;
4185 
4186  if(score < best_score){
4187  best_score= score;
4188  best_level= level - 64;
4189  }
4190  }
4191  block[0]= best_level;
4192  s->coded_score[n] = best_score - dc*dc;
4193  if(best_level == 0) return -1;
4194  else return last_non_zero;
4195  }
4196 
4197  i= last_i;
4198  av_assert2(last_level);
4199 
4200  block[ perm_scantable[last_non_zero] ]= last_level;
4201  i -= last_run + 1;
4202 
4203  for(; i>start_i; i -= run_tab[i] + 1){
4204  block[ perm_scantable[i-1] ]= level_tab[i];
4205  }
4206 
4207  return last_non_zero;
4208 }
4209 
4210 static int16_t basis[64][64];
4211 
4212 static void build_basis(uint8_t *perm){
4213  int i, j, x, y;
4214  emms_c();
4215  for(i=0; i<8; i++){
4216  for(j=0; j<8; j++){
4217  for(y=0; y<8; y++){
4218  for(x=0; x<8; x++){
4219  double s= 0.25*(1<<BASIS_SHIFT);
4220  int index= 8*i + j;
4221  int perm_index= perm[index];
4222  if(i==0) s*= sqrt(0.5);
4223  if(j==0) s*= sqrt(0.5);
4224  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4225  }
4226  }
4227  }
4228  }
4229 }
4230 
4231 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4232  int16_t *block, int16_t *weight, int16_t *orig,
4233  int n, int qscale){
4234  int16_t rem[64];
4235  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4236  const uint8_t *scantable;
4237  const uint8_t *perm_scantable;
4238 // unsigned int threshold1, threshold2;
4239 // int bias=0;
4240  int run_tab[65];
4241  int prev_run=0;
4242  int prev_level=0;
4243  int qmul, qadd, start_i, last_non_zero, i, dc;
4244  const uint8_t *length;
4245  const uint8_t *last_length;
4246  int lambda;
4247  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4248 
4249  if(basis[0][0] == 0)
4250  build_basis(s->idsp.idct_permutation);
4251 
4252  qmul= qscale*2;
4253  qadd= (qscale-1)|1;
4254  if (s->mb_intra) {
4255  scantable= s->intra_scantable.scantable;
4256  perm_scantable= s->intra_scantable.permutated;
4257  if (!s->h263_aic) {
4258  if (n < 4)
4259  q = s->y_dc_scale;
4260  else
4261  q = s->c_dc_scale;
4262  } else{
4263  /* For AIC we skip quant/dequant of INTRADC */
4264  q = 1;
4265  qadd=0;
4266  }
4267  q <<= RECON_SHIFT-3;
4268  /* note: block[0] is assumed to be positive */
4269  dc= block[0]*q;
4270 // block[0] = (block[0] + (q >> 1)) / q;
4271  start_i = 1;
4272 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4273 // bias= 1<<(QMAT_SHIFT-1);
4274  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4275  length = s->intra_chroma_ac_vlc_length;
4276  last_length= s->intra_chroma_ac_vlc_last_length;
4277  } else {
4278  length = s->intra_ac_vlc_length;
4279  last_length= s->intra_ac_vlc_last_length;
4280  }
4281  } else {
4282  scantable= s->inter_scantable.scantable;
4283  perm_scantable= s->inter_scantable.permutated;
4284  dc= 0;
4285  start_i = 0;
4286  length = s->inter_ac_vlc_length;
4287  last_length= s->inter_ac_vlc_last_length;
4288  }
4289  last_non_zero = s->block_last_index[n];
4290 
4291  dc += (1<<(RECON_SHIFT-1));
4292  for(i=0; i<64; i++){
4293  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4294  }
4295 
4296  sum=0;
4297  for(i=0; i<64; i++){
4298  int one= 36;
4299  int qns=4;
4300  int w;
4301 
4302  w= FFABS(weight[i]) + qns*one;
4303  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4304 
4305  weight[i] = w;
4306 // w=weight[i] = (63*qns + (w/2)) / w;
4307 
4308  av_assert2(w>0);
4309  av_assert2(w<(1<<6));
4310  sum += w*w;
4311  }
4312  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4313 
4314  run=0;
4315  rle_index=0;
4316  for(i=start_i; i<=last_non_zero; i++){
4317  int j= perm_scantable[i];
4318  const int level= block[j];
4319  int coeff;
4320 
4321  if(level){
4322  if(level<0) coeff= qmul*level - qadd;
4323  else coeff= qmul*level + qadd;
4324  run_tab[rle_index++]=run;
4325  run=0;
4326 
4327  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4328  }else{
4329  run++;
4330  }
4331  }
4332 
4333  for(;;){
4334  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4335  int best_coeff=0;
4336  int best_change=0;
4337  int run2, best_unquant_change=0, analyze_gradient;
4338  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4339 
4340  if(analyze_gradient){
4341  for(i=0; i<64; i++){
4342  int w= weight[i];
4343 
4344  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4345  }
4346  s->fdsp.fdct(d1);
4347  }
4348 
4349  if(start_i){
4350  const int level= block[0];
4351  int change, old_coeff;
4352 
4353  av_assert2(s->mb_intra);
4354 
4355  old_coeff= q*level;
4356 
4357  for(change=-1; change<=1; change+=2){
4358  int new_level= level + change;
4359  int score, new_coeff;
4360 
4361  new_coeff= q*new_level;
4362  if(new_coeff >= 2048 || new_coeff < 0)
4363  continue;
4364 
4365  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4366  new_coeff - old_coeff);
4367  if(score<best_score){
4368  best_score= score;
4369  best_coeff= 0;
4370  best_change= change;
4371  best_unquant_change= new_coeff - old_coeff;
4372  }
4373  }
4374  }
4375 
4376  run=0;
4377  rle_index=0;
4378  run2= run_tab[rle_index++];
4379  prev_level=0;
4380  prev_run=0;
4381 
4382  for(i=start_i; i<64; i++){
4383  int j= perm_scantable[i];
4384  const int level= block[j];
4385  int change, old_coeff;
4386 
4387  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4388  break;
4389 
4390  if(level){
4391  if(level<0) old_coeff= qmul*level - qadd;
4392  else old_coeff= qmul*level + qadd;
4393  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4394  }else{
4395  old_coeff=0;
4396  run2--;
4397  av_assert2(run2>=0 || i >= last_non_zero );
4398  }
4399 
4400  for(change=-1; change<=1; change+=2){
4401  int new_level= level + change;
4402  int score, new_coeff, unquant_change;
4403 
4404  score=0;
4405  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4406  continue;
4407 
4408  if(new_level){
4409  if(new_level<0) new_coeff= qmul*new_level - qadd;
4410  else new_coeff= qmul*new_level + qadd;
4411  if(new_coeff >= 2048 || new_coeff <= -2048)
4412  continue;
4413  //FIXME check for overflow
4414 
4415  if(level){
4416  if(level < 63 && level > -63){
4417  if(i < last_non_zero)
4418  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4419  - length[UNI_AC_ENC_INDEX(run, level+64)];
4420  else
4421  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4422  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4423  }
4424  }else{
4425  av_assert2(FFABS(new_level)==1);
4426 
4427  if(analyze_gradient){
4428  int g= d1[ scantable[i] ];
4429  if(g && (g^new_level) >= 0)
4430  continue;
4431  }
4432 
4433  if(i < last_non_zero){
4434  int next_i= i + run2 + 1;
4435  int next_level= block[ perm_scantable[next_i] ] + 64;
4436 
4437  if(next_level&(~127))
4438  next_level= 0;
4439 
4440  if(next_i < last_non_zero)
4441  score += length[UNI_AC_ENC_INDEX(run, 65)]
4442  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4443  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4444  else
4445  score += length[UNI_AC_ENC_INDEX(run, 65)]
4446  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4447  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4448  }else{
4449  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4450  if(prev_level){
4451  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4452  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4453  }
4454  }
4455  }
4456  }else{
4457  new_coeff=0;
4458  av_assert2(FFABS(level)==1);
4459 
4460  if(i < last_non_zero){
4461  int next_i= i + run2 + 1;
4462  int next_level= block[ perm_scantable[next_i] ] + 64;
4463 
4464  if(next_level&(~127))
4465  next_level= 0;
4466 
4467  if(next_i < last_non_zero)
4468  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4469  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4470  - length[UNI_AC_ENC_INDEX(run, 65)];
4471  else
4472  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4473  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4474  - length[UNI_AC_ENC_INDEX(run, 65)];
4475  }else{
4476  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4477  if(prev_level){
4478  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4479  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4480  }
4481  }
4482  }
4483 
4484  score *= lambda;
4485 
4486  unquant_change= new_coeff - old_coeff;
4487  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4488 
4489  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4490  unquant_change);
4491  if(score<best_score){
4492  best_score= score;
4493  best_coeff= i;
4494  best_change= change;
4495  best_unquant_change= unquant_change;
4496  }
4497  }
4498  if(level){
4499  prev_level= level + 64;
4500  if(prev_level&(~127))
4501  prev_level= 0;
4502  prev_run= run;
4503  run=0;
4504  }else{
4505  run++;
4506  }
4507  }
4508 
4509  if(best_change){
4510  int j= perm_scantable[ best_coeff ];
4511 
4512  block[j] += best_change;
4513 
4514  if(best_coeff > last_non_zero){
4515  last_non_zero= best_coeff;
4516  av_assert2(block[j]);
4517  }else{
4518  for(; last_non_zero>=start_i; last_non_zero--){
4519  if(block[perm_scantable[last_non_zero]])
4520  break;
4521  }
4522  }
4523 
4524  run=0;
4525  rle_index=0;
4526  for(i=start_i; i<=last_non_zero; i++){
4527  int j= perm_scantable[i];
4528  const int level= block[j];
4529 
4530  if(level){
4531  run_tab[rle_index++]=run;
4532  run=0;
4533  }else{
4534  run++;
4535  }
4536  }
4537 
4538  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4539  }else{
4540  break;
4541  }
4542  }
4543 
4544  return last_non_zero;
4545 }
4546 
4547 /**
4548  * Permute an 8x8 block according to permutation.
4549  * @param block the block which will be permuted according to
4550  * the given permutation vector
4551  * @param permutation the permutation vector
4552  * @param last the last non zero coefficient in scantable order, used to
4553  * speed the permutation up
4554  * @param scantable the used scantable, this is only used to speed the
4555  * permutation up, the block is not (inverse) permutated
4556  * to scantable order!
4557  */
4558 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4559  const uint8_t *scantable, int last)
4560 {
4561  int i;
4562  int16_t temp[64];
4563 
4564  if (last <= 0)
4565  return;
4566  //FIXME it is ok but not clean and might fail for some permutations
4567  // if (permutation[1] == 1)
4568  // return;
4569 
4570  for (i = 0; i <= last; i++) {
4571  const int j = scantable[i];
4572  temp[j] = block[j];
4573  block[j] = 0;
4574  }
4575 
4576  for (i = 0; i <= last; i++) {
4577  const int j = scantable[i];
4578  const int perm_j = permutation[j];
4579  block[perm_j] = temp[j];
4580  }
4581 }
4582 
4584  int16_t *block, int n,
4585  int qscale, int *overflow)
4586 {
4587  int i, j, level, last_non_zero, q, start_i;
4588  const int *qmat;
4589  const uint8_t *scantable;
4590  int bias;
4591  int max=0;
4592  unsigned int threshold1, threshold2;
4593 
4594  s->fdsp.fdct(block);
4595 
4596  if(s->dct_error_sum)
4597  s->denoise_dct(s, block);
4598 
4599  if (s->mb_intra) {
4600  scantable= s->intra_scantable.scantable;
4601  if (!s->h263_aic) {
4602  if (n < 4)
4603  q = s->y_dc_scale;
4604  else
4605  q = s->c_dc_scale;
4606  q = q << 3;
4607  } else
4608  /* For AIC we skip quant/dequant of INTRADC */
4609  q = 1 << 3;
4610 
4611  /* note: block[0] is assumed to be positive */
4612  block[0] = (block[0] + (q >> 1)) / q;
4613  start_i = 1;
4614  last_non_zero = 0;
4615  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4616  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4617  } else {
4618  scantable= s->inter_scantable.scantable;
4619  start_i = 0;
4620  last_non_zero = -1;
4621  qmat = s->q_inter_matrix[qscale];
4622  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4623  }
4624  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4625  threshold2= (threshold1<<1);
4626  for(i=63;i>=start_i;i--) {
4627  j = scantable[i];
4628  level = block[j] * qmat[j];
4629 
4630  if(((unsigned)(level+threshold1))>threshold2){
4631  last_non_zero = i;
4632  break;
4633  }else{
4634  block[j]=0;
4635  }
4636  }
4637  for(i=start_i; i<=last_non_zero; i++) {
4638  j = scantable[i];
4639  level = block[j] * qmat[j];
4640 
4641 // if( bias+level >= (1<<QMAT_SHIFT)
4642 // || bias-level >= (1<<QMAT_SHIFT)){
4643  if(((unsigned)(level+threshold1))>threshold2){
4644  if(level>0){
4645  level= (bias + level)>>QMAT_SHIFT;
4646  block[j]= level;
4647  }else{
4648  level= (bias - level)>>QMAT_SHIFT;
4649  block[j]= -level;
4650  }
4651  max |=level;
4652  }else{
4653  block[j]=0;
4654  }
4655  }
4656  *overflow= s->max_qcoeff < max; //overflow might have happened
4657 
4658  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4659  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4660  ff_block_permute(block, s->idsp.idct_permutation,
4661  scantable, last_non_zero);
4662 
4663  return last_non_zero;
4664 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1359
MpegEncContext::i_count
int i_count
Definition: mpegvideo.h:345
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:31
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:687
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:84
ff_speedhq_end_slice
void ff_speedhq_end_slice(MpegEncContext *s)
Definition: speedhqenc.c:155
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:235
MpegEncContext::mb_skipped
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:190
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:429
encode_picture
static int encode_picture(MpegEncContext *s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3585
MpegEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegvideoenc.h)
Definition: mpegvideo.h:290
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:48
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:265
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
h263data.h
ff_speedhq_encode_init
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
Definition: speedhqenc.c:98
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:105
level
uint8_t level
Definition: svq3.c:205
MpegEncContext::data_partitioning
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:388
av_clip
#define av_clip
Definition: common.h:100
set_frame_distances
static void set_frame_distances(MpegEncContext *s)
Definition: mpegvideo_enc.c:3571
get_bits_diff
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideoenc.h:158
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:541
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:201
H263_GOB_HEIGHT
#define H263_GOB_HEIGHT(h)
Definition: h263.h:28
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
r
const char * r
Definition: vf_curves.c:127
ff_h261_encode_init
av_cold int ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:346
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:819
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:222
ff_mpeg1_encode_mb
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
MpegEncContext::b_code
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:229
mem_internal.h
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:308
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:483
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:292
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1309
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1667
ff_speedhq_encode_picture_header
void ff_speedhq_encode_picture_header(MpegEncContext *s)
Definition: speedhqenc.c:143
ff_wmv2_encode_picture_header
int ff_wmv2_encode_picture_header(MpegEncContext *s)
Definition: wmv2enc.c:97
thread.h
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
matrix
Definition: vc1dsp.c:43
ff_h261_encode_picture_header
void ff_h261_encode_picture_header(MpegEncContext *s)
Definition: h261enc.c:63
src1
const pixel * src1
Definition: h264pred_template.c:421
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:224
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
sse_mb
static int sse_mb(MpegEncContext *s)
Definition: mpegvideo_enc.c:2674
mpv_reconstruct_mb_template.c
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
COPY
#define COPY(a)
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4210
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:980
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:164
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2718
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:840
update_noise_reduction
static void update_noise_reduction(MpegEncContext *s)
Definition: mpegvideo_enc.c:1757
out_size
int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:263
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
MpegEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:389
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:41
MpegEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:331
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:459
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:501
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:38
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:206
w
uint8_t w
Definition: llviddspenc.c:38
qmat16
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.h:356
BUF_BITS
static const int BUF_BITS
Definition: put_bits.h:48
internal.h
ff_h261_encode_mb
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:234
AVPacket::data
uint8_t * data
Definition: packet.h:539
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:376
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:41
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:100
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:180
data
const char data[16]
Definition: mxf.c:149
MpegEncContext::vbv_delay
int vbv_delay
Definition: mpegvideo.h:207
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
prepare_picture
static int prepare_picture(MpegEncContext *s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1146
MpegEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideo.h:219
ff_mjpeg_encode_init
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:294
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac for the current non-intra MB.
Definition: mpegvideo.c:796
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:88
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:820
ff_msmpeg4_encode_mb
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:385
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:557
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:58
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:497
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:326
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2699
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2112
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
MpegEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideo.h:148
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:968
wmv2enc.h
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:64
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1273
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:351
mpegutils.h
ff_flv_encode_picture_header
void ff_flv_encode_picture_header(MpegEncContext *s)
Definition: flvenc.c:28
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:228
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:607
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:594
AVCodecContext::mb_cmp
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:894
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
MpegEncContext::mv
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:275
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:52
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:262
MECmpContext::sum_abs_dctelem
int(* sum_abs_dctelem)(const int16_t *block)
Definition: me_cmp.h:56
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:127
MAX_FCODE
#define MAX_FCODE
Definition: mpegutils.h:36
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:910
MpegEncContext::encoding_error
uint64_t encoding_error[MPV_MAX_PLANES]
Definition: mpegvideo.h:255
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1793
skip_check
static int skip_check(MpegEncContext *s, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1318
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:84
sp5x.h
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:66
estimate_qp
static int estimate_qp(MpegEncContext *s, int dry_run)
Definition: mpegvideo_enc.c:3535
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
FDCTDSPContext
Definition: fdctdsp.h:28
faandct.h
Floating point AAN DCT.
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:206
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:197
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:63
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:826
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:460
mpeg12enc.h
merge_context_after_me
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3502
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:179
frame_start
static void frame_start(MpegEncContext *s)
Definition: mpegvideo_enc.c:1778
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:463
fail
#define fail()
Definition: checkasm.h:189
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:138
ff_refstruct_pool_uninit
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:67
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:996
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:508
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:43
perm
perm
Definition: f_perms.c:75
weight
const h264_weight_func weight
Definition: h264dsp_init.c:33
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1241
FF_ALLOC_TYPED_ARRAY
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:77
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:334
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:61
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:318
ff_sqrt
#define ff_sqrt
Definition: mathops.h:216
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
mpv_encode_init_static
static void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:269
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:449
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
refstruct.h
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:330
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:45
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2829
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:50
ff_mjpeg_encode_mb
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:523
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:895
MpegEncContext::i_tex_bits
int i_tex_bits
Definition: mpegvideo.h:343
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:774
RateControlContext
rate control context.
Definition: ratecontrol.h:60
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
update_mb_info
static void update_mb_info(MpegEncContext *s, int startcode)
Definition: mpegvideo_enc.c:2806
av_cold
#define av_cold
Definition: attributes.h:90
MAX_MV
#define MAX_MV
Definition: motion_est.h:36
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:640
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:97
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:129
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_enc.c:1088
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4212
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:729
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
ff_h263_chroma_qscale_table
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:260
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1105
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:112
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:460
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MpegEncContext::mv_dir
int mv_dir
Definition: mpegvideo.h:260
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:863
MpegEncContext::mb_skip_run
int mb_skip_run
Definition: mpegvideo.h:288
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1502
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:150
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1585
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:224
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
init_qscale_tab
static void init_qscale_tab(MpegEncContext *s)
init s->cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:240
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1302
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4558
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1537
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:271
PutBitContext
Definition: put_bits.h:50
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:46
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:861
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2743
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
me_cmp_init
static av_cold int me_cmp_init(MpegEncContext *s, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:309
ff_mpv_alloc_pic_pool
av_cold FFRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1287
MECmpContext
Definition: me_cmp.h:55
MpegEncContext::pb2
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:392
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:51
ff_mpeg4_encode_mb
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:486
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
NULL
#define NULL
Definition: coverity.c:32
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpv_reconstruct_mb_template.c:56
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:713
write_mb_info
static void write_mb_info(MpegEncContext *s)
Definition: mpegvideo_enc.c:2786
run
uint8_t run
Definition: svq3.c:204
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:287
MpegEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideo.h:329
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:228
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:307
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:47
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:486
MECmpContext::sad
me_cmp_func sad[6]
Definition: me_cmp.h:58
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:501
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo_enc.c:253
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:853
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:279
ff_dct_encode_init
av_cold void ff_dct_encode_init(MpegEncContext *s)
Definition: mpegvideo_enc.c:294
mathops.h
dct_quantize_c
static int dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4583
MpegEncContext::mv_bits
int mv_bits
Definition: mpegvideo.h:341
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:347
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3501
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:900
ff_vbv_update
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:709
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1028
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:969
qpeldsp.h
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:143
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1337
MAY_BE_MPEG12_H261
#define MAY_BE_MPEG12_H261
Definition: mpv_reconstruct_mb_template.c:24
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
mpeg12codecs.h
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:281
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1314
ff_mjpeg_amv_encode_picture_header
void ff_mjpeg_amv_encode_picture_header(MpegEncContext *s)
Definition: mjpegenc.c:93
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
AVOnce
#define AVOnce
Definition: thread.h:202
ff_mpeg4_encode_picture_header
int ff_mpeg4_encode_picture_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1050
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_mpeg1_encode_init
void ff_mpeg1_encode_init(MpegEncContext *s)
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
ff_clean_h263_qscales
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
Definition: ituh263enc.c:273
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:266
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:847
mpv_encode_defaults
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:279
denoise_dct_c
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
Definition: mpegvideo_enc.c:3874
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:411
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1327
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:550
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:847
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1412
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:131
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1344
encode_mb_internal
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2136
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:236
f
f
Definition: af_crystalizer.c:122
merge_context_after_encode
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3508
MpegEncContext::mb_intra
int mb_intra
Definition: mpegvideo.h:289
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:82
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:55
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:540
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1037
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:199
load_input_picture
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1178
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:57
height
#define height
Definition: dsp.h:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:388
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
Definition: mpegvideo_enc.c:2022
MECmpContext::sse
me_cmp_func sse[6]
Definition: me_cmp.h:59
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:299
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:609
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1076
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:128
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:202
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:55
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
MpegEncContext::dct_count
int dct_count[2]
Definition: mpegvideo.h:330
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:280
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:40
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:99
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:60
MpegEncContext::interlaced_dct
int interlaced_dct
Definition: mpegvideo.h:467
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:281
MECmpContext::nsse
me_cmp_func nsse[6]
Definition: me_cmp.h:67
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
MpegEncContext::MSMP4_WMV2
@ MSMP4_WMV2
Definition: mpegvideo.h:425
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:478
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:538
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:314
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
MpegEncContext::mv_type
int mv_type
Definition: mpegvideo.h:264
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ff_me_init_pic
void ff_me_init_pic(MpegEncContext *s)
Definition: motion_est.c:370
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:268
ff_h263_encode_mb
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:493
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:545
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:286
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:38
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:36
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:53
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
ff_mpvenc_dct_init_mips
av_cold void ff_mpvenc_dct_init_mips(MpegEncContext *s)
Definition: mpegvideoenc_init_mips.c:26
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:42
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
update_qscale
static void update_qscale(MpegEncContext *s)
Definition: mpegvideo_enc.c:196
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:137
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:461
ff_msmpeg4_encode_picture_header
void ff_msmpeg4_encode_picture_header(MpegEncContext *s)
Definition: msmpeg4enc.c:218
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:301
emms.h
MpegEncContext::block_last_index
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:78
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
ff_speedhq_encode_mb
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: speedhqenc.c:252
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::last_mv
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
Definition: mpegvideo.h:277
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:465
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:459
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:532
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3898
get_intra_count
static int get_intra_count(MpegEncContext *s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1119
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2863
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:37
estimate_best_b_count
static int estimate_best_b_count(MpegEncContext *s)
Definition: mpegvideo_enc.c:1380
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:1003
MpegEncContext::esc3_level_length
int esc3_level_length
Definition: mpegvideo.h:429
src2
const pixel * src2
Definition: h264pred_template.c:422
MpegEncContext::tex_pb
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:391
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:276
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:287
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:113
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:830
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:637
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:197
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:911
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:610
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
copy_context_before_encode
static void copy_context_before_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2537
MpegEncContext::MSMP4_WMV1
@ MSMP4_WMV1
Definition: mpegvideo.h:424
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:247
AVCodecContext::height
int height
Definition: avcodec.h:624
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:508
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
idctdsp.h
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:352
ret
ret
Definition: filter_design.txt:187
me_cmp_func
int(* me_cmp_func)(struct MpegEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:50
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:292
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1389
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
encode_mb_hq
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2602
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:94
ff_mpeg1_encode_picture_header
void ff_mpeg1_encode_picture_header(MpegEncContext *s)
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:53
mpeg12data.h
ff_set_mpeg4_time
void ff_set_mpeg4_time(MpegEncContext *s)
Definition: mpeg4videoenc.c:871
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
ff_fix_long_mvs
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1716
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:476
MpegEncContext::dquant
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:205
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:462
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:868
AVCodecContext
main external API structure.
Definition: avcodec.h:451
default_mv_penalty
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:96
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:901
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1353
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:377
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:231
mpeg4video.h
MpegEncContext::last_bits
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:347
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1266
encode_mb
static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2527
AVRational::den
int den
Denominator.
Definition: rational.h:60
MpegEncContext::MSMP4_V3
@ MSMP4_V3
Definition: mpegvideo.h:423
select_input_picture
static int select_input_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:1662
MpegEncContext::MSMP4_UNUSED
@ MSMP4_UNUSED
Definition: mpegvideo.h:420
set_bframe_chain_length
static int set_bframe_chain_length(MpegEncContext *s)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1532
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:854
frame_end
static void frame_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:1721
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
temp
else temp
Definition: vf_mcdeint.c:263
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:866
flvenc.h
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:85
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:971
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_h263_encode_picture_header
void ff_h263_encode_picture_header(MpegEncContext *s)
Definition: ituh263enc.c:109
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:37
ff_speedhq_mb_y_order_to_mb
int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.c:280
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:801
ff_convert_matrix
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:112
packet_internal.h
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:351
MpegEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideo.h:545
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1316
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
dct_quantize_refine
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4231
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MpegEncContext *s)
Definition: rv10enc.c:34
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
copy_context_after_encode
static void copy_context_after_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2564
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:261
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1053
ff_h263_update_mb
void ff_h263_update_mb(MpegEncContext *s)
Definition: ituh263enc.c:690
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:970
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AVPacket
This structure stores compressed data.
Definition: packet.h:516
mpeg4videodata.h
clip_coeffs
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
Definition: mpegvideo_enc.c:2078
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:989
MAX_B_FRAMES
#define MAX_B_FRAMES
Definition: mpegvideo.h:51
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:253
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:434
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sse
static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2653
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:346
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:460
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: packet.c:609
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
ff_get_2pass_fcode
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:884
h
h
Definition: vp9dsp_template.c:2070
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:909
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:151
MpegEncContext::MSMP4_V2
@ MSMP4_V2
Definition: mpegvideo.h:422
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:85
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:59
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:150
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:282
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
ff_refstruct_unref
void ff_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:220
ff_wmv2_encode_mb
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:170
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:648
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:344
src
#define src
Definition: vp8dsp.c:248
ff_rv20_encode_picture_header
void ff_rv20_encode_picture_header(MpegEncContext *s)
Definition: rv20enc.c:37
ff_refstruct_pool_get
void * ff_refstruct_pool_get(FFRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
pixblockdsp.h
ff_get_best_fcode
int ff_get_best_fcode(MpegEncContext *s, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1614
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:951
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:310
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:458
h263.h
write_slice_end
static void write_slice_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:2766
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:700
intmath.h