FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/internal.h"
31 #include "libavutil/intmath.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
35 #include "avcodec.h"
36 #include "dct.h"
37 #include "dsputil.h"
38 #include "mpeg12.h"
39 #include "mpegvideo.h"
40 #include "h261.h"
41 #include "h263.h"
42 #include "mathops.h"
43 #include "mjpegenc.h"
44 #include "msmpeg4.h"
45 #include "faandct.h"
46 #include "thread.h"
47 #include "aandcttab.h"
48 #include "flv.h"
49 #include "mpeg4video.h"
50 #include "internal.h"
51 #include "bytestream.h"
52 #include <limits.h>
53 #include "sp5x.h"
54 
56 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
57 static int sse_mb(MpegEncContext *s);
58 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
59 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
60 
63 
66  { NULL },
67 };
68 
69 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
70  uint16_t (*qmat16)[2][64],
71  const uint16_t *quant_matrix,
72  int bias, int qmin, int qmax, int intra)
73 {
74  int qscale;
75  int shift = 0;
76 
77  for (qscale = qmin; qscale <= qmax; qscale++) {
78  int i;
79  if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
80  dsp->fdct == ff_jpeg_fdct_islow_10 ||
81  dsp->fdct == ff_faandct) {
82  for (i = 0; i < 64; i++) {
83  const int j = dsp->idct_permutation[i];
84  /* 16 <= qscale * quant_matrix[i] <= 7905
85  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
86  * 19952 <= x <= 249205026
87  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
88  * 3444240 >= (1 << 36) / (x) >= 275 */
89 
90  qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
91  (qscale * quant_matrix[j]));
92  }
93  } else if (dsp->fdct == ff_fdct_ifast) {
94  for (i = 0; i < 64; i++) {
95  const int j = dsp->idct_permutation[i];
96  /* 16 <= qscale * quant_matrix[i] <= 7905
97  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
98  * 19952 <= x <= 249205026
99  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
100  * 3444240 >= (1 << 36) / (x) >= 275 */
101 
102  qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
103  (ff_aanscales[i] * (int64_t)qscale * quant_matrix[j]));
104  }
105  } else {
106  for (i = 0; i < 64; i++) {
107  const int j = dsp->idct_permutation[i];
108  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
109  * Assume x = qscale * quant_matrix[i]
110  * So 16 <= x <= 7905
111  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
112  * so 32768 >= (1 << 19) / (x) >= 67 */
113  qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
114  (qscale * quant_matrix[j]));
115  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
116  // (qscale * quant_matrix[i]);
117  qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
118  (qscale * quant_matrix[j]);
119 
120  if (qmat16[qscale][0][i] == 0 ||
121  qmat16[qscale][0][i] == 128 * 256)
122  qmat16[qscale][0][i] = 128 * 256 - 1;
123  qmat16[qscale][1][i] =
124  ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
125  qmat16[qscale][0][i]);
126  }
127  }
128 
129  for (i = intra; i < 64; i++) {
130  int64_t max = 8191;
131  if (dsp->fdct == ff_fdct_ifast) {
132  max = (8191LL * ff_aanscales[i]) >> 14;
133  }
134  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
135  shift++;
136  }
137  }
138  }
139  if (shift) {
140  av_log(NULL, AV_LOG_INFO,
141  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
142  QMAT_SHIFT - shift);
143  }
144 }
145 
146 static inline void update_qscale(MpegEncContext *s)
147 {
148  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
149  (FF_LAMBDA_SHIFT + 7);
150  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
151 
152  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
154 }
155 
156 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
157 {
158  int i;
159 
160  if (matrix) {
161  put_bits(pb, 1, 1);
162  for (i = 0; i < 64; i++) {
163  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
164  }
165  } else
166  put_bits(pb, 1, 0);
167 }
168 
169 /**
170  * init s->current_picture.qscale_table from s->lambda_table
171  */
173 {
174  int8_t * const qscale_table = s->current_picture.qscale_table;
175  int i;
176 
177  for (i = 0; i < s->mb_num; i++) {
178  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
179  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
180  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
181  s->avctx->qmax);
182  }
183 }
184 
187 {
188 #define COPY(a) dst->a= src->a
189  COPY(pict_type);
191  COPY(f_code);
192  COPY(b_code);
193  COPY(qscale);
194  COPY(lambda);
195  COPY(lambda2);
198  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
199  COPY(progressive_frame); // FIXME don't set in encode_header
200  COPY(partitioned_frame); // FIXME don't set in encode_header
201 #undef COPY
202 }
203 
204 /**
205  * Set the given MpegEncContext to defaults for encoding.
206  * the changed fields will not depend upon the prior state of the MpegEncContext.
207  */
209 {
210  int i;
212 
213  for (i = -16; i < 16; i++) {
214  default_fcode_tab[i + MAX_MV] = 1;
215  }
218 }
219 
221  if (ARCH_X86)
223 
224  if (!s->dct_quantize)
226  if (!s->denoise_dct)
229  if (s->avctx->trellis)
231 
232  return 0;
233 }
234 
235 /* init video encoder */
237 {
238  MpegEncContext *s = avctx->priv_data;
239  int i, ret;
240  int chroma_h_shift, chroma_v_shift;
241 
243 
244  switch (avctx->codec_id) {
246  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
247  avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
248  av_log(avctx, AV_LOG_ERROR,
249  "only YUV420 and YUV422 are supported\n");
250  return -1;
251  }
252  break;
253  case AV_CODEC_ID_LJPEG:
254  if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
255  avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
256  avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
257  avctx->pix_fmt != AV_PIX_FMT_BGR0 &&
258  avctx->pix_fmt != AV_PIX_FMT_BGRA &&
259  avctx->pix_fmt != AV_PIX_FMT_BGR24 &&
260  ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
261  avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
262  avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
264  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
265  return -1;
266  }
267  break;
268  case AV_CODEC_ID_MJPEG:
269  case AV_CODEC_ID_AMV:
270  if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
271  avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
272  avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
273  ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
274  avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
275  avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
277  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
278  return -1;
279  }
280  break;
281  default:
282  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
283  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
284  return -1;
285  }
286  }
287 
288  switch (avctx->pix_fmt) {
289  case AV_PIX_FMT_YUVJ444P:
290  case AV_PIX_FMT_YUV444P:
292  break;
293  case AV_PIX_FMT_YUVJ422P:
294  case AV_PIX_FMT_YUV422P:
296  break;
297  case AV_PIX_FMT_YUVJ420P:
298  case AV_PIX_FMT_YUV420P:
299  default:
301  break;
302  }
303 
304  s->bit_rate = avctx->bit_rate;
305  s->width = avctx->width;
306  s->height = avctx->height;
307  if (avctx->gop_size > 600 &&
309  av_log(avctx, AV_LOG_WARNING,
310  "keyframe interval too large!, reducing it from %d to %d\n",
311  avctx->gop_size, 600);
312  avctx->gop_size = 600;
313  }
314  s->gop_size = avctx->gop_size;
315  s->avctx = avctx;
316  s->flags = avctx->flags;
317  s->flags2 = avctx->flags2;
318  s->max_b_frames = avctx->max_b_frames;
319  s->codec_id = avctx->codec->id;
321  s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
322  s->mpeg_quant = avctx->mpeg_quant;
323  s->rtp_mode = !!avctx->rtp_payload_size;
326 
327  if (s->gop_size <= 1) {
328  s->intra_only = 1;
329  s->gop_size = 12;
330  } else {
331  s->intra_only = 0;
332  }
333 
334  s->me_method = avctx->me_method;
335 
336  /* Fixed QSCALE */
337  s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
338 
339  s->adaptive_quant = (s->avctx->lumi_masking ||
340  s->avctx->dark_masking ||
343  s->avctx->p_masking ||
344  s->avctx->border_masking ||
345  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
346  !s->fixed_qscale;
347 
349 
350  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
351  switch(avctx->codec_id) {
354  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
355  break;
356  case AV_CODEC_ID_MPEG4:
360  if (avctx->rc_max_rate >= 15000000) {
361  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000L) * (760-320) / (38400000 - 15000000);
362  } else if(avctx->rc_max_rate >= 2000000) {
363  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000L) * (320- 80) / (15000000 - 2000000);
364  } else if(avctx->rc_max_rate >= 384000) {
365  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000L) * ( 80- 40) / ( 2000000 - 384000);
366  } else
367  avctx->rc_buffer_size = 40;
368  avctx->rc_buffer_size *= 16384;
369  break;
370  }
371  if (avctx->rc_buffer_size) {
372  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
373  }
374  }
375 
376  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
377  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
378  if (avctx->rc_max_rate && !avctx->rc_buffer_size)
379  return -1;
380  }
381 
382  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
383  av_log(avctx, AV_LOG_INFO,
384  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
385  }
386 
387  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
388  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
389  return -1;
390  }
391 
392  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
393  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
394  return -1;
395  }
396 
397  if (avctx->rc_max_rate &&
398  avctx->rc_max_rate == avctx->bit_rate &&
399  avctx->rc_max_rate != avctx->rc_min_rate) {
400  av_log(avctx, AV_LOG_INFO,
401  "impossible bitrate constraints, this will fail\n");
402  }
403 
404  if (avctx->rc_buffer_size &&
405  avctx->bit_rate * (int64_t)avctx->time_base.num >
406  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
407  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
408  return -1;
409  }
410 
411  if (!s->fixed_qscale &&
412  avctx->bit_rate * av_q2d(avctx->time_base) >
413  avctx->bit_rate_tolerance) {
414  av_log(avctx, AV_LOG_ERROR,
415  "bitrate tolerance too small for bitrate\n");
416  return -1;
417  }
418 
419  if (s->avctx->rc_max_rate &&
420  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
423  90000LL * (avctx->rc_buffer_size - 1) >
424  s->avctx->rc_max_rate * 0xFFFFLL) {
425  av_log(avctx, AV_LOG_INFO,
426  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
427  "specified vbv buffer is too large for the given bitrate!\n");
428  }
429 
430  if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
432  s->codec_id != AV_CODEC_ID_FLV1) {
433  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
434  return -1;
435  }
436 
437  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
438  av_log(avctx, AV_LOG_ERROR,
439  "OBMC is only supported with simple mb decision\n");
440  return -1;
441  }
442 
443  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
444  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
445  return -1;
446  }
447 
448  if (s->max_b_frames &&
449  s->codec_id != AV_CODEC_ID_MPEG4 &&
452  av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
453  return -1;
454  }
455 
456  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
457  s->codec_id == AV_CODEC_ID_H263 ||
458  s->codec_id == AV_CODEC_ID_H263P) &&
459  (avctx->sample_aspect_ratio.num > 255 ||
460  avctx->sample_aspect_ratio.den > 255)) {
461  av_log(avctx, AV_LOG_WARNING,
462  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
465  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
466  }
467 
468  if ((s->codec_id == AV_CODEC_ID_H263 ||
469  s->codec_id == AV_CODEC_ID_H263P) &&
470  (avctx->width > 2048 ||
471  avctx->height > 1152 )) {
472  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
473  return -1;
474  }
475  if ((s->codec_id == AV_CODEC_ID_H263 ||
476  s->codec_id == AV_CODEC_ID_H263P) &&
477  ((avctx->width &3) ||
478  (avctx->height&3) )) {
479  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
480  return -1;
481  }
482 
483  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
484  (avctx->width > 4095 ||
485  avctx->height > 4095 )) {
486  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
487  return -1;
488  }
489 
490  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
491  (avctx->width > 16383 ||
492  avctx->height > 16383 )) {
493  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
494  return -1;
495  }
496 
497  if (s->codec_id == AV_CODEC_ID_RV10 &&
498  (avctx->width &15 ||
499  avctx->height&15 )) {
500  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
501  return AVERROR(EINVAL);
502  }
503 
504  if (s->codec_id == AV_CODEC_ID_RV20 &&
505  (avctx->width &3 ||
506  avctx->height&3 )) {
507  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
508  return AVERROR(EINVAL);
509  }
510 
511  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
512  s->codec_id == AV_CODEC_ID_WMV2) &&
513  avctx->width & 1) {
514  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
515  return -1;
516  }
517 
520  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
521  return -1;
522  }
523 
524  // FIXME mpeg2 uses that too
525  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
526  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
527  av_log(avctx, AV_LOG_ERROR,
528  "mpeg2 style quantization not supported by codec\n");
529  return -1;
530  }
531 
532  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
533  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
534  return -1;
535  }
536 
537  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
539  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
540  return -1;
541  }
542 
543  if (s->avctx->scenechange_threshold < 1000000000 &&
544  (s->flags & CODEC_FLAG_CLOSED_GOP)) {
545  av_log(avctx, AV_LOG_ERROR,
546  "closed gop with scene change detection are not supported yet, "
547  "set threshold to 1000000000\n");
548  return -1;
549  }
550 
551  if (s->flags & CODEC_FLAG_LOW_DELAY) {
552  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
553  av_log(avctx, AV_LOG_ERROR,
554  "low delay forcing is only available for mpeg2\n");
555  return -1;
556  }
557  if (s->max_b_frames != 0) {
558  av_log(avctx, AV_LOG_ERROR,
559  "b frames cannot be used with low delay\n");
560  return -1;
561  }
562  }
563 
564  if (s->q_scale_type == 1) {
565  if (avctx->qmax > 12) {
566  av_log(avctx, AV_LOG_ERROR,
567  "non linear quant only supports qmax <= 12 currently\n");
568  return -1;
569  }
570  }
571 
572  if (s->avctx->thread_count > 1 &&
573  s->codec_id != AV_CODEC_ID_MPEG4 &&
576  s->codec_id != AV_CODEC_ID_MJPEG &&
577  (s->codec_id != AV_CODEC_ID_H263P)) {
578  av_log(avctx, AV_LOG_ERROR,
579  "multi threaded encoding not supported by codec\n");
580  return -1;
581  }
582 
583  if (s->avctx->thread_count < 1) {
584  av_log(avctx, AV_LOG_ERROR,
585  "automatic thread number detection not supported by codec, "
586  "patch welcome\n");
587  return -1;
588  }
589 
590  if (s->avctx->slices > 1 || s->avctx->thread_count > 1)
591  s->rtp_mode = 1;
592 
593  if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P)
594  s->h263_slice_structured = 1;
595 
596  if (!avctx->time_base.den || !avctx->time_base.num) {
597  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
598  return -1;
599  }
600 
601  i = (INT_MAX / 2 + 128) >> 8;
602  if (avctx->mb_threshold >= i) {
603  av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
604  i - 1);
605  return -1;
606  }
607 
608  if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
609  av_log(avctx, AV_LOG_INFO,
610  "notice: b_frame_strategy only affects the first pass\n");
611  avctx->b_frame_strategy = 0;
612  }
613 
614  i = av_gcd(avctx->time_base.den, avctx->time_base.num);
615  if (i > 1) {
616  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
617  avctx->time_base.den /= i;
618  avctx->time_base.num /= i;
619  //return -1;
620  }
621 
623  // (a + x * 3 / 8) / x
624  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
625  s->inter_quant_bias = 0;
626  } else {
627  s->intra_quant_bias = 0;
628  // (a - x / 4) / x
629  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
630  }
631 
632  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
633  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
634  return AVERROR(EINVAL);
635  }
636 
641 
642  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
643 
644  avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
645 
646  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
647  s->avctx->time_base.den > (1 << 16) - 1) {
648  av_log(avctx, AV_LOG_ERROR,
649  "timebase %d/%d not supported by MPEG 4 standard, "
650  "the maximum admitted value for the timebase denominator "
651  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
652  (1 << 16) - 1);
653  return -1;
654  }
655  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
656 
657  switch (avctx->codec->id) {
659  s->out_format = FMT_MPEG1;
660  s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
661  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
662  break;
664  s->out_format = FMT_MPEG1;
665  s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
666  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
667  s->rtp_mode = 1;
668  break;
669  case AV_CODEC_ID_LJPEG:
670  case AV_CODEC_ID_MJPEG:
671  case AV_CODEC_ID_AMV:
672  s->out_format = FMT_MJPEG;
673  s->intra_only = 1; /* force intra only for jpeg */
674  if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
675  (avctx->pix_fmt == AV_PIX_FMT_BGR0
676  || s->avctx->pix_fmt == AV_PIX_FMT_BGRA
677  || s->avctx->pix_fmt == AV_PIX_FMT_BGR24)) {
678  s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
679  s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
680  s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
681  } else if (avctx->pix_fmt == AV_PIX_FMT_YUV444P || avctx->pix_fmt == AV_PIX_FMT_YUVJ444P) {
682  s->mjpeg_vsample[0] = s->mjpeg_vsample[1] = s->mjpeg_vsample[2] = 2;
683  s->mjpeg_hsample[0] = s->mjpeg_hsample[1] = s->mjpeg_hsample[2] = 1;
684  } else {
685  s->mjpeg_vsample[0] = 2;
686  s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
687  s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
688  s->mjpeg_hsample[0] = 2;
689  s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
690  s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
691  }
692  if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
693  ff_mjpeg_encode_init(s) < 0)
694  return -1;
695  avctx->delay = 0;
696  s->low_delay = 1;
697  break;
698  case AV_CODEC_ID_H261:
699  if (!CONFIG_H261_ENCODER)
700  return -1;
701  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
702  av_log(avctx, AV_LOG_ERROR,
703  "The specified picture size of %dx%d is not valid for the "
704  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
705  s->width, s->height);
706  return -1;
707  }
708  s->out_format = FMT_H261;
709  avctx->delay = 0;
710  s->low_delay = 1;
711  break;
712  case AV_CODEC_ID_H263:
713  if (!CONFIG_H263_ENCODER)
714  return -1;
716  s->width, s->height) == 8) {
717  av_log(avctx, AV_LOG_ERROR,
718  "The specified picture size of %dx%d is not valid for "
719  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
720  "352x288, 704x576, and 1408x1152. "
721  "Try H.263+.\n", s->width, s->height);
722  return -1;
723  }
724  s->out_format = FMT_H263;
725  avctx->delay = 0;
726  s->low_delay = 1;
727  break;
728  case AV_CODEC_ID_H263P:
729  s->out_format = FMT_H263;
730  s->h263_plus = 1;
731  /* Fx */
732  s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
733  s->modified_quant = s->h263_aic;
734  s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
735  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
736 
737  /* /Fx */
738  /* These are just to be sure */
739  avctx->delay = 0;
740  s->low_delay = 1;
741  break;
742  case AV_CODEC_ID_FLV1:
743  s->out_format = FMT_H263;
744  s->h263_flv = 2; /* format = 1; 11-bit codes */
745  s->unrestricted_mv = 1;
746  s->rtp_mode = 0; /* don't allow GOB */
747  avctx->delay = 0;
748  s->low_delay = 1;
749  break;
750  case AV_CODEC_ID_RV10:
751  s->out_format = FMT_H263;
752  avctx->delay = 0;
753  s->low_delay = 1;
754  break;
755  case AV_CODEC_ID_RV20:
756  s->out_format = FMT_H263;
757  avctx->delay = 0;
758  s->low_delay = 1;
759  s->modified_quant = 1;
760  s->h263_aic = 1;
761  s->h263_plus = 1;
762  s->loop_filter = 1;
763  s->unrestricted_mv = 0;
764  break;
765  case AV_CODEC_ID_MPEG4:
766  s->out_format = FMT_H263;
767  s->h263_pred = 1;
768  s->unrestricted_mv = 1;
769  s->low_delay = s->max_b_frames ? 0 : 1;
770  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
771  break;
773  s->out_format = FMT_H263;
774  s->h263_pred = 1;
775  s->unrestricted_mv = 1;
776  s->msmpeg4_version = 2;
777  avctx->delay = 0;
778  s->low_delay = 1;
779  break;
781  s->out_format = FMT_H263;
782  s->h263_pred = 1;
783  s->unrestricted_mv = 1;
784  s->msmpeg4_version = 3;
785  s->flipflop_rounding = 1;
786  avctx->delay = 0;
787  s->low_delay = 1;
788  break;
789  case AV_CODEC_ID_WMV1:
790  s->out_format = FMT_H263;
791  s->h263_pred = 1;
792  s->unrestricted_mv = 1;
793  s->msmpeg4_version = 4;
794  s->flipflop_rounding = 1;
795  avctx->delay = 0;
796  s->low_delay = 1;
797  break;
798  case AV_CODEC_ID_WMV2:
799  s->out_format = FMT_H263;
800  s->h263_pred = 1;
801  s->unrestricted_mv = 1;
802  s->msmpeg4_version = 5;
803  s->flipflop_rounding = 1;
804  avctx->delay = 0;
805  s->low_delay = 1;
806  break;
807  default:
808  return -1;
809  }
810 
811  avctx->has_b_frames = !s->low_delay;
812 
813  s->encoding = 1;
814 
815  s->progressive_frame =
818  s->alternate_scan);
819 
820  /* init */
821  if (ff_MPV_common_init(s) < 0)
822  return -1;
823 
825 
826  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
828 
829  s->quant_precision = 5;
830 
831  ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
833 
834  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
836  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
840  if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
841  && s->out_format == FMT_MPEG1)
843 
844  /* init q matrix */
845  for (i = 0; i < 64; i++) {
846  int j = s->dsp.idct_permutation[i];
847  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
848  s->mpeg_quant) {
851  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
852  s->intra_matrix[j] =
854  } else {
855  /* mpeg1/2 */
858  }
859  if (s->avctx->intra_matrix)
860  s->intra_matrix[j] = s->avctx->intra_matrix[i];
861  if (s->avctx->inter_matrix)
862  s->inter_matrix[j] = s->avctx->inter_matrix[i];
863  }
864 
865  /* precompute matrix */
866  /* for mjpeg, we do include qscale in the matrix */
867  if (s->out_format != FMT_MJPEG) {
869  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
870  31, 1);
872  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
873  31, 0);
874  }
875 
876  if (ff_rate_control_init(s) < 0)
877  return -1;
878 
879  if (avctx->b_frame_strategy == 2) {
880  for (i = 0; i < s->max_b_frames + 2; i++) {
881  s->tmp_frames[i] = av_frame_alloc();
882  if (!s->tmp_frames[i])
883  return AVERROR(ENOMEM);
884 
886  s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
887  s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
888 
889  ret = av_frame_get_buffer(s->tmp_frames[i], 32);
890  if (ret < 0)
891  return ret;
892  }
893  }
894 
895  return 0;
896 }
897 
899 {
900  MpegEncContext *s = avctx->priv_data;
901  int i;
902 
904 
906  if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
907  s->out_format == FMT_MJPEG)
909 
910  av_freep(&avctx->extradata);
911 
912  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
913  av_frame_free(&s->tmp_frames[i]);
914 
915  return 0;
916 }
917 
918 static int get_sae(uint8_t *src, int ref, int stride)
919 {
920  int x,y;
921  int acc = 0;
922 
923  for (y = 0; y < 16; y++) {
924  for (x = 0; x < 16; x++) {
925  acc += FFABS(src[x + y * stride] - ref);
926  }
927  }
928 
929  return acc;
930 }
931 
933  uint8_t *ref, int stride)
934 {
935  int x, y, w, h;
936  int acc = 0;
937 
938  w = s->width & ~15;
939  h = s->height & ~15;
940 
941  for (y = 0; y < h; y += 16) {
942  for (x = 0; x < w; x += 16) {
943  int offset = x + y * stride;
944  int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
945  16);
946  int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
947  int sae = get_sae(src + offset, mean, stride);
948 
949  acc += sae + 500 < sad;
950  }
951  }
952  return acc;
953 }
954 
955 
956 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
957 {
958  Picture *pic = NULL;
959  int64_t pts;
960  int i, display_picture_number = 0, ret;
961  const int encoding_delay = s->max_b_frames ? s->max_b_frames :
962  (s->low_delay ? 0 : 1);
963  int direct = 1;
964 
965  if (pic_arg) {
966  pts = pic_arg->pts;
967  display_picture_number = s->input_picture_number++;
968 
969  if (pts != AV_NOPTS_VALUE) {
971  int64_t last = s->user_specified_pts;
972 
973  if (pts <= last) {
975  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
976  pts, last);
977  return AVERROR(EINVAL);
978  }
979 
980  if (!s->low_delay && display_picture_number == 1)
981  s->dts_delta = pts - last;
982  }
983  s->user_specified_pts = pts;
984  } else {
986  s->user_specified_pts =
987  pts = s->user_specified_pts + 1;
989  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
990  pts);
991  } else {
992  pts = display_picture_number;
993  }
994  }
995  }
996 
997  if (pic_arg) {
998  if (!pic_arg->buf[0])
999  direct = 0;
1000  if (pic_arg->linesize[0] != s->linesize)
1001  direct = 0;
1002  if (pic_arg->linesize[1] != s->uvlinesize)
1003  direct = 0;
1004  if (pic_arg->linesize[2] != s->uvlinesize)
1005  direct = 0;
1006 
1007  av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
1008  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1009 
1010  if (direct) {
1011  i = ff_find_unused_picture(s, 1);
1012  if (i < 0)
1013  return i;
1014 
1015  pic = &s->picture[i];
1016  pic->reference = 3;
1017 
1018  if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
1019  return ret;
1020  if (ff_alloc_picture(s, pic, 1) < 0) {
1021  return -1;
1022  }
1023  } else {
1024  i = ff_find_unused_picture(s, 0);
1025  if (i < 0)
1026  return i;
1027 
1028  pic = &s->picture[i];
1029  pic->reference = 3;
1030 
1031  if (ff_alloc_picture(s, pic, 0) < 0) {
1032  return -1;
1033  }
1034 
1035  if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1036  pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1037  pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1038  // empty
1039  } else {
1040  int h_chroma_shift, v_chroma_shift;
1042  &h_chroma_shift,
1043  &v_chroma_shift);
1044 
1045  for (i = 0; i < 3; i++) {
1046  int src_stride = pic_arg->linesize[i];
1047  int dst_stride = i ? s->uvlinesize : s->linesize;
1048  int h_shift = i ? h_chroma_shift : 0;
1049  int v_shift = i ? v_chroma_shift : 0;
1050  int w = s->width >> h_shift;
1051  int h = s->height >> v_shift;
1052  uint8_t *src = pic_arg->data[i];
1053  uint8_t *dst = pic->f.data[i];
1054 
1055  if (s->codec_id == AV_CODEC_ID_AMV && !(s->avctx->flags & CODEC_FLAG_EMU_EDGE)) {
1056  h = ((s->height + 15)/16*16) >> v_shift;
1057  }
1058 
1059  if (!s->avctx->rc_buffer_size)
1060  dst += INPLACE_OFFSET;
1061 
1062  if (src_stride == dst_stride)
1063  memcpy(dst, src, src_stride * h);
1064  else {
1065  int h2 = h;
1066  uint8_t *dst2 = dst;
1067  while (h2--) {
1068  memcpy(dst2, src, w);
1069  dst2 += dst_stride;
1070  src += src_stride;
1071  }
1072  }
1073  if ((s->width & 15) || (s->height & 15)) {
1074  s->dsp.draw_edges(dst, dst_stride,
1075  w, h,
1076  16>>h_shift,
1077  16>>v_shift,
1078  EDGE_BOTTOM);
1079  }
1080  }
1081  }
1082  }
1083  ret = av_frame_copy_props(&pic->f, pic_arg);
1084  if (ret < 0)
1085  return ret;
1086 
1087  pic->f.display_picture_number = display_picture_number;
1088  pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
1089  }
1090 
1091  /* shift buffer entries */
1092  for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1093  s->input_picture[i - 1] = s->input_picture[i];
1094 
1095  s->input_picture[encoding_delay] = (Picture*) pic;
1096 
1097  return 0;
1098 }
1099 
1101 {
1102  int x, y, plane;
1103  int score = 0;
1104  int64_t score64 = 0;
1105 
1106  for (plane = 0; plane < 3; plane++) {
1107  const int stride = p->f.linesize[plane];
1108  const int bw = plane ? 1 : 2;
1109  for (y = 0; y < s->mb_height * bw; y++) {
1110  for (x = 0; x < s->mb_width * bw; x++) {
1111  int off = p->shared ? 0 : 16;
1112  uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1113  uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1114  int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1115 
1116  switch (s->avctx->frame_skip_exp) {
1117  case 0: score = FFMAX(score, v); break;
1118  case 1: score += FFABS(v); break;
1119  case 2: score += v * v; break;
1120  case 3: score64 += FFABS(v * v * (int64_t)v); break;
1121  case 4: score64 += v * v * (int64_t)(v * v); break;
1122  }
1123  }
1124  }
1125  }
1126 
1127  if (score)
1128  score64 = score;
1129 
1130  if (score64 < s->avctx->frame_skip_threshold)
1131  return 1;
1132  if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1133  return 1;
1134  return 0;
1135 }
1136 
1138 {
1139  AVPacket pkt = { 0 };
1140  int ret, got_output;
1141 
1142  av_init_packet(&pkt);
1143  ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1144  if (ret < 0)
1145  return ret;
1146 
1147  ret = pkt.size;
1148  av_free_packet(&pkt);
1149  return ret;
1150 }
1151 
1153 {
1156  const int scale = s->avctx->brd_scale;
1157  int i, j, out_size, p_lambda, b_lambda, lambda2;
1158  int64_t best_rd = INT64_MAX;
1159  int best_b_count = -1;
1160 
1161  av_assert0(scale >= 0 && scale <= 3);
1162 
1163  //emms_c();
1164  //s->next_picture_ptr->quality;
1165  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1166  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1167  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1168  if (!b_lambda) // FIXME we should do this somewhere else
1169  b_lambda = p_lambda;
1170  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1172 
1173  c->width = s->width >> scale;
1174  c->height = s->height >> scale;
1176  CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1177  c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1178  c->mb_decision = s->avctx->mb_decision;
1179  c->me_cmp = s->avctx->me_cmp;
1180  c->mb_cmp = s->avctx->mb_cmp;
1181  c->me_sub_cmp = s->avctx->me_sub_cmp;
1183  c->time_base = s->avctx->time_base;
1184  c->max_b_frames = s->max_b_frames;
1185 
1186  if (avcodec_open2(c, codec, NULL) < 0)
1187  return -1;
1188 
1189  for (i = 0; i < s->max_b_frames + 2; i++) {
1190  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1191  s->next_picture_ptr;
1192 
1193  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1194  pre_input = *pre_input_ptr;
1195 
1196  if (!pre_input.shared && i) {
1197  pre_input.f.data[0] += INPLACE_OFFSET;
1198  pre_input.f.data[1] += INPLACE_OFFSET;
1199  pre_input.f.data[2] += INPLACE_OFFSET;
1200  }
1201 
1202  s->dsp.shrink[scale](s->tmp_frames[i]->data[0], s->tmp_frames[i]->linesize[0],
1203  pre_input.f.data[0], pre_input.f.linesize[0],
1204  c->width, c->height);
1205  s->dsp.shrink[scale](s->tmp_frames[i]->data[1], s->tmp_frames[i]->linesize[1],
1206  pre_input.f.data[1], pre_input.f.linesize[1],
1207  c->width >> 1, c->height >> 1);
1208  s->dsp.shrink[scale](s->tmp_frames[i]->data[2], s->tmp_frames[i]->linesize[2],
1209  pre_input.f.data[2], pre_input.f.linesize[2],
1210  c->width >> 1, c->height >> 1);
1211  }
1212  }
1213 
1214  for (j = 0; j < s->max_b_frames + 1; j++) {
1215  int64_t rd = 0;
1216 
1217  if (!s->input_picture[j])
1218  break;
1219 
1220  c->error[0] = c->error[1] = c->error[2] = 0;
1221 
1223  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1224 
1225  out_size = encode_frame(c, s->tmp_frames[0]);
1226 
1227  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1228 
1229  for (i = 0; i < s->max_b_frames + 1; i++) {
1230  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1231 
1232  s->tmp_frames[i + 1]->pict_type = is_p ?
1234  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1235 
1236  out_size = encode_frame(c, s->tmp_frames[i + 1]);
1237 
1238  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1239  }
1240 
1241  /* get the delayed frames */
1242  while (out_size) {
1243  out_size = encode_frame(c, NULL);
1244  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1245  }
1246 
1247  rd += c->error[0] + c->error[1] + c->error[2];
1248 
1249  if (rd < best_rd) {
1250  best_rd = rd;
1251  best_b_count = j;
1252  }
1253  }
1254 
1255  avcodec_close(c);
1256  av_freep(&c);
1257 
1258  return best_b_count;
1259 }
1260 
1262 {
1263  int i, ret;
1264 
1265  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1267  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1268 
1269  /* set next picture type & ordering */
1270  if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1271  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1272  s->next_picture_ptr == NULL || s->intra_only) {
1273  s->reordered_input_picture[0] = s->input_picture[0];
1276  s->coded_picture_number++;
1277  } else {
1278  int b_frames;
1279 
1281  if (s->picture_in_gop_number < s->gop_size &&
1282  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1283  // FIXME check that te gop check above is +-1 correct
1284  av_frame_unref(&s->input_picture[0]->f);
1285 
1286  emms_c();
1287  ff_vbv_update(s, 0);
1288 
1289  goto no_output_pic;
1290  }
1291  }
1292 
1293  if (s->flags & CODEC_FLAG_PASS2) {
1294  for (i = 0; i < s->max_b_frames + 1; i++) {
1295  int pict_num = s->input_picture[0]->f.display_picture_number + i;
1296 
1297  if (pict_num >= s->rc_context.num_entries)
1298  break;
1299  if (!s->input_picture[i]) {
1300  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1301  break;
1302  }
1303 
1304  s->input_picture[i]->f.pict_type =
1305  s->rc_context.entry[pict_num].new_pict_type;
1306  }
1307  }
1308 
1309  if (s->avctx->b_frame_strategy == 0) {
1310  b_frames = s->max_b_frames;
1311  while (b_frames && !s->input_picture[b_frames])
1312  b_frames--;
1313  } else if (s->avctx->b_frame_strategy == 1) {
1314  for (i = 1; i < s->max_b_frames + 1; i++) {
1315  if (s->input_picture[i] &&
1316  s->input_picture[i]->b_frame_score == 0) {
1317  s->input_picture[i]->b_frame_score =
1318  get_intra_count(s,
1319  s->input_picture[i ]->f.data[0],
1320  s->input_picture[i - 1]->f.data[0],
1321  s->linesize) + 1;
1322  }
1323  }
1324  for (i = 0; i < s->max_b_frames + 1; i++) {
1325  if (s->input_picture[i] == NULL ||
1326  s->input_picture[i]->b_frame_score - 1 >
1327  s->mb_num / s->avctx->b_sensitivity)
1328  break;
1329  }
1330 
1331  b_frames = FFMAX(0, i - 1);
1332 
1333  /* reset scores */
1334  for (i = 0; i < b_frames + 1; i++) {
1335  s->input_picture[i]->b_frame_score = 0;
1336  }
1337  } else if (s->avctx->b_frame_strategy == 2) {
1338  b_frames = estimate_best_b_count(s);
1339  } else {
1340  av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1341  b_frames = 0;
1342  }
1343 
1344  emms_c();
1345 
1346  for (i = b_frames - 1; i >= 0; i--) {
1347  int type = s->input_picture[i]->f.pict_type;
1348  if (type && type != AV_PICTURE_TYPE_B)
1349  b_frames = i;
1350  }
1351  if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1352  b_frames == s->max_b_frames) {
1354  "warning, too many b frames in a row\n");
1355  }
1356 
1357  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1358  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1359  s->gop_size > s->picture_in_gop_number) {
1360  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1361  } else {
1362  if (s->flags & CODEC_FLAG_CLOSED_GOP)
1363  b_frames = 0;
1364  s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1365  }
1366  }
1367 
1368  if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1369  s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1370  b_frames--;
1371 
1372  s->reordered_input_picture[0] = s->input_picture[b_frames];
1376  s->coded_picture_number++;
1377  for (i = 0; i < b_frames; i++) {
1378  s->reordered_input_picture[i + 1] = s->input_picture[i];
1379  s->reordered_input_picture[i + 1]->f.pict_type =
1382  s->coded_picture_number++;
1383  }
1384  }
1385  }
1386 no_output_pic:
1387  if (s->reordered_input_picture[0]) {
1390  AV_PICTURE_TYPE_B ? 3 : 0;
1391 
1393  if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1394  return ret;
1395 
1396  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1397  // input is a shared pix, so we can't modifiy it -> alloc a new
1398  // one & ensure that the shared one is reuseable
1399 
1400  Picture *pic;
1401  int i = ff_find_unused_picture(s, 0);
1402  if (i < 0)
1403  return i;
1404  pic = &s->picture[i];
1405 
1407  if (ff_alloc_picture(s, pic, 0) < 0) {
1408  return -1;
1409  }
1410 
1411  ret = av_frame_copy_props(&pic->f, &s->reordered_input_picture[0]->f);
1412  if (ret < 0)
1413  return ret;
1414 
1415  /* mark us unused / free shared pic */
1417  s->reordered_input_picture[0]->shared = 0;
1418 
1419  s->current_picture_ptr = pic;
1420  } else {
1421  // input is not a shared pix -> reuse buffer for current_pix
1423  for (i = 0; i < 4; i++) {
1424  s->new_picture.f.data[i] += INPLACE_OFFSET;
1425  }
1426  }
1428  if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1429  s->current_picture_ptr)) < 0)
1430  return ret;
1431 
1433  } else {
1435  }
1436  return 0;
1437 }
1438 
1440  AVFrame *pic_arg, int *got_packet)
1441 {
1442  MpegEncContext *s = avctx->priv_data;
1443  int i, stuffing_count, ret;
1444  int context_count = s->slice_context_count;
1445 
1446  s->picture_in_gop_number++;
1447 
1448  if (load_input_picture(s, pic_arg) < 0)
1449  return -1;
1450 
1451  if (select_input_picture(s) < 0) {
1452  return -1;
1453  }
1454 
1455  /* output? */
1456  if (s->new_picture.f.data[0]) {
1457  if ((ret = ff_alloc_packet2(avctx, pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0)
1458  return ret;
1459  if (s->mb_info) {
1462  s->mb_width*s->mb_height*12);
1463  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1464  }
1465 
1466  for (i = 0; i < context_count; i++) {
1467  int start_y = s->thread_context[i]->start_mb_y;
1468  int end_y = s->thread_context[i]-> end_mb_y;
1469  int h = s->mb_height;
1470  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1471  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1472 
1473  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1474  }
1475 
1476  s->pict_type = s->new_picture.f.pict_type;
1477  //emms_c();
1478  if (ff_MPV_frame_start(s, avctx) < 0)
1479  return -1;
1480 vbv_retry:
1481  if (encode_picture(s, s->picture_number) < 0)
1482  return -1;
1483 
1484  avctx->header_bits = s->header_bits;
1485  avctx->mv_bits = s->mv_bits;
1486  avctx->misc_bits = s->misc_bits;
1487  avctx->i_tex_bits = s->i_tex_bits;
1488  avctx->p_tex_bits = s->p_tex_bits;
1489  avctx->i_count = s->i_count;
1490  // FIXME f/b_count in avctx
1491  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1492  avctx->skip_count = s->skip_count;
1493 
1494  ff_MPV_frame_end(s);
1495 
1496  if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1498 
1499  if (avctx->rc_buffer_size) {
1500  RateControlContext *rcc = &s->rc_context;
1501  int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1502 
1503  if (put_bits_count(&s->pb) > max_size &&
1504  s->lambda < s->avctx->lmax) {
1505  s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1506  (s->qscale + 1) / s->qscale);
1507  if (s->adaptive_quant) {
1508  int i;
1509  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1510  s->lambda_table[i] =
1511  FFMAX(s->lambda_table[i] + 1,
1512  s->lambda_table[i] * (s->qscale + 1) /
1513  s->qscale);
1514  }
1515  s->mb_skipped = 0; // done in MPV_frame_start()
1516  // done in encode_picture() so we must undo it
1517  if (s->pict_type == AV_PICTURE_TYPE_P) {
1518  if (s->flipflop_rounding ||
1519  s->codec_id == AV_CODEC_ID_H263P ||
1521  s->no_rounding ^= 1;
1522  }
1523  if (s->pict_type != AV_PICTURE_TYPE_B) {
1524  s->time_base = s->last_time_base;
1525  s->last_non_b_time = s->time - s->pp_time;
1526  }
1527  for (i = 0; i < context_count; i++) {
1528  PutBitContext *pb = &s->thread_context[i]->pb;
1529  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1530  }
1531  goto vbv_retry;
1532  }
1533 
1534  assert(s->avctx->rc_max_rate);
1535  }
1536 
1537  if (s->flags & CODEC_FLAG_PASS1)
1539 
1540  for (i = 0; i < 4; i++) {
1542  avctx->error[i] += s->current_picture_ptr->f.error[i];
1543  }
1544 
1545  if (s->flags & CODEC_FLAG_PASS1)
1546  assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1547  avctx->i_tex_bits + avctx->p_tex_bits ==
1548  put_bits_count(&s->pb));
1549  flush_put_bits(&s->pb);
1550  s->frame_bits = put_bits_count(&s->pb);
1551 
1552  stuffing_count = ff_vbv_update(s, s->frame_bits);
1553  s->stuffing_bits = 8*stuffing_count;
1554  if (stuffing_count) {
1555  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1556  stuffing_count + 50) {
1557  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1558  return -1;
1559  }
1560 
1561  switch (s->codec_id) {
1564  while (stuffing_count--) {
1565  put_bits(&s->pb, 8, 0);
1566  }
1567  break;
1568  case AV_CODEC_ID_MPEG4:
1569  put_bits(&s->pb, 16, 0);
1570  put_bits(&s->pb, 16, 0x1C3);
1571  stuffing_count -= 4;
1572  while (stuffing_count--) {
1573  put_bits(&s->pb, 8, 0xFF);
1574  }
1575  break;
1576  default:
1577  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1578  }
1579  flush_put_bits(&s->pb);
1580  s->frame_bits = put_bits_count(&s->pb);
1581  }
1582 
1583  /* update mpeg1/2 vbv_delay for CBR */
1584  if (s->avctx->rc_max_rate &&
1585  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1586  s->out_format == FMT_MPEG1 &&
1587  90000LL * (avctx->rc_buffer_size - 1) <=
1588  s->avctx->rc_max_rate * 0xFFFFLL) {
1589  int vbv_delay, min_delay;
1590  double inbits = s->avctx->rc_max_rate *
1591  av_q2d(s->avctx->time_base);
1592  int minbits = s->frame_bits - 8 *
1593  (s->vbv_delay_ptr - s->pb.buf - 1);
1594  double bits = s->rc_context.buffer_index + minbits - inbits;
1595 
1596  if (bits < 0)
1598  "Internal error, negative bits\n");
1599 
1600  assert(s->repeat_first_field == 0);
1601 
1602  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1603  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1604  s->avctx->rc_max_rate;
1605 
1606  vbv_delay = FFMAX(vbv_delay, min_delay);
1607 
1608  av_assert0(vbv_delay < 0xFFFF);
1609 
1610  s->vbv_delay_ptr[0] &= 0xF8;
1611  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1612  s->vbv_delay_ptr[1] = vbv_delay >> 5;
1613  s->vbv_delay_ptr[2] &= 0x07;
1614  s->vbv_delay_ptr[2] |= vbv_delay << 3;
1615  avctx->vbv_delay = vbv_delay * 300;
1616  }
1617  s->total_bits += s->frame_bits;
1618  avctx->frame_bits = s->frame_bits;
1619 
1620  pkt->pts = s->current_picture.f.pts;
1621  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1623  pkt->dts = pkt->pts - s->dts_delta;
1624  else
1625  pkt->dts = s->reordered_pts;
1626  s->reordered_pts = pkt->pts;
1627  } else
1628  pkt->dts = pkt->pts;
1629  if (s->current_picture.f.key_frame)
1630  pkt->flags |= AV_PKT_FLAG_KEY;
1631  if (s->mb_info)
1633  } else {
1634  s->frame_bits = 0;
1635  }
1636 
1637  /* release non-reference frames */
1638  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1639  if (!s->picture[i].reference)
1640  ff_mpeg_unref_picture(s, &s->picture[i]);
1641  }
1642 
1643  assert((s->frame_bits & 7) == 0);
1644 
1645  pkt->size = s->frame_bits / 8;
1646  *got_packet = !!pkt->size;
1647  return 0;
1648 }
1649 
1651  int n, int threshold)
1652 {
1653  static const char tab[64] = {
1654  3, 2, 2, 1, 1, 1, 1, 1,
1655  1, 1, 1, 1, 1, 1, 1, 1,
1656  1, 1, 1, 1, 1, 1, 1, 1,
1657  0, 0, 0, 0, 0, 0, 0, 0,
1658  0, 0, 0, 0, 0, 0, 0, 0,
1659  0, 0, 0, 0, 0, 0, 0, 0,
1660  0, 0, 0, 0, 0, 0, 0, 0,
1661  0, 0, 0, 0, 0, 0, 0, 0
1662  };
1663  int score = 0;
1664  int run = 0;
1665  int i;
1666  int16_t *block = s->block[n];
1667  const int last_index = s->block_last_index[n];
1668  int skip_dc;
1669 
1670  if (threshold < 0) {
1671  skip_dc = 0;
1672  threshold = -threshold;
1673  } else
1674  skip_dc = 1;
1675 
1676  /* Are all we could set to zero already zero? */
1677  if (last_index <= skip_dc - 1)
1678  return;
1679 
1680  for (i = 0; i <= last_index; i++) {
1681  const int j = s->intra_scantable.permutated[i];
1682  const int level = FFABS(block[j]);
1683  if (level == 1) {
1684  if (skip_dc && i == 0)
1685  continue;
1686  score += tab[run];
1687  run = 0;
1688  } else if (level > 1) {
1689  return;
1690  } else {
1691  run++;
1692  }
1693  }
1694  if (score >= threshold)
1695  return;
1696  for (i = skip_dc; i <= last_index; i++) {
1697  const int j = s->intra_scantable.permutated[i];
1698  block[j] = 0;
1699  }
1700  if (block[0])
1701  s->block_last_index[n] = 0;
1702  else
1703  s->block_last_index[n] = -1;
1704 }
1705 
1706 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1707  int last_index)
1708 {
1709  int i;
1710  const int maxlevel = s->max_qcoeff;
1711  const int minlevel = s->min_qcoeff;
1712  int overflow = 0;
1713 
1714  if (s->mb_intra) {
1715  i = 1; // skip clipping of intra dc
1716  } else
1717  i = 0;
1718 
1719  for (; i <= last_index; i++) {
1720  const int j = s->intra_scantable.permutated[i];
1721  int level = block[j];
1722 
1723  if (level > maxlevel) {
1724  level = maxlevel;
1725  overflow++;
1726  } else if (level < minlevel) {
1727  level = minlevel;
1728  overflow++;
1729  }
1730 
1731  block[j] = level;
1732  }
1733 
1734  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1735  av_log(s->avctx, AV_LOG_INFO,
1736  "warning, clipping %d dct coefficients to %d..%d\n",
1737  overflow, minlevel, maxlevel);
1738 }
1739 
1740 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1741 {
1742  int x, y;
1743  // FIXME optimize
1744  for (y = 0; y < 8; y++) {
1745  for (x = 0; x < 8; x++) {
1746  int x2, y2;
1747  int sum = 0;
1748  int sqr = 0;
1749  int count = 0;
1750 
1751  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1752  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1753  int v = ptr[x2 + y2 * stride];
1754  sum += v;
1755  sqr += v * v;
1756  count++;
1757  }
1758  }
1759  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1760  }
1761  }
1762 }
1763 
1765  int motion_x, int motion_y,
1766  int mb_block_height,
1767  int mb_block_width,
1768  int mb_block_count)
1769 {
1770  int16_t weight[12][64];
1771  int16_t orig[12][64];
1772  const int mb_x = s->mb_x;
1773  const int mb_y = s->mb_y;
1774  int i;
1775  int skip_dct[12];
1776  int dct_offset = s->linesize * 8; // default for progressive frames
1777  int uv_dct_offset = s->uvlinesize * 8;
1778  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1779  ptrdiff_t wrap_y, wrap_c;
1780 
1781  for (i = 0; i < mb_block_count; i++)
1782  skip_dct[i] = s->skipdct;
1783 
1784  if (s->adaptive_quant) {
1785  const int last_qp = s->qscale;
1786  const int mb_xy = mb_x + mb_y * s->mb_stride;
1787 
1788  s->lambda = s->lambda_table[mb_xy];
1789  update_qscale(s);
1790 
1791  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1792  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1793  s->dquant = s->qscale - last_qp;
1794 
1795  if (s->out_format == FMT_H263) {
1796  s->dquant = av_clip(s->dquant, -2, 2);
1797 
1798  if (s->codec_id == AV_CODEC_ID_MPEG4) {
1799  if (!s->mb_intra) {
1800  if (s->pict_type == AV_PICTURE_TYPE_B) {
1801  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1802  s->dquant = 0;
1803  }
1804  if (s->mv_type == MV_TYPE_8X8)
1805  s->dquant = 0;
1806  }
1807  }
1808  }
1809  }
1810  ff_set_qscale(s, last_qp + s->dquant);
1811  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1812  ff_set_qscale(s, s->qscale + s->dquant);
1813 
1814  wrap_y = s->linesize;
1815  wrap_c = s->uvlinesize;
1816  ptr_y = s->new_picture.f.data[0] +
1817  (mb_y * 16 * wrap_y) + mb_x * 16;
1818  ptr_cb = s->new_picture.f.data[1] +
1819  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
1820  ptr_cr = s->new_picture.f.data[2] +
1821  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
1822 
1823  if((mb_x*16+16 > s->width || mb_y*16+16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
1824  uint8_t *ebuf = s->edge_emu_buffer + 32;
1825  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
1826  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
1827  s->vdsp.emulated_edge_mc(ebuf, wrap_y, ptr_y, wrap_y, 16, 16, mb_x * 16,
1828  mb_y * 16, s->width, s->height);
1829  ptr_y = ebuf;
1830  s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, wrap_c, ptr_cb, wrap_c, mb_block_width,
1831  mb_block_height, mb_x * mb_block_width, mb_y * mb_block_height,
1832  cw, ch);
1833  ptr_cb = ebuf + 18 * wrap_y;
1834  s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 16, wrap_c, ptr_cr, wrap_c, mb_block_width,
1835  mb_block_height, mb_x * mb_block_width, mb_y * mb_block_height,
1836  cw, ch);
1837  ptr_cr = ebuf + 18 * wrap_y + 16;
1838  }
1839 
1840  if (s->mb_intra) {
1841  if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1842  int progressive_score, interlaced_score;
1843 
1844  s->interlaced_dct = 0;
1845  progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1846  NULL, wrap_y, 8) +
1847  s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1848  NULL, wrap_y, 8) - 400;
1849 
1850  if (progressive_score > 0) {
1851  interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1852  NULL, wrap_y * 2, 8) +
1853  s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1854  NULL, wrap_y * 2, 8);
1855  if (progressive_score > interlaced_score) {
1856  s->interlaced_dct = 1;
1857 
1858  dct_offset = wrap_y;
1859  uv_dct_offset = wrap_c;
1860  wrap_y <<= 1;
1861  if (s->chroma_format == CHROMA_422 ||
1862  s->chroma_format == CHROMA_444)
1863  wrap_c <<= 1;
1864  }
1865  }
1866  }
1867 
1868  s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1869  s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1870  s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1871  s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1872 
1873  if (s->flags & CODEC_FLAG_GRAY) {
1874  skip_dct[4] = 1;
1875  skip_dct[5] = 1;
1876  } else {
1877  s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1878  s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1879  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
1880  s->dsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
1881  s->dsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
1882  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
1883  s->dsp.get_pixels(s->block[6], ptr_cb + 8, wrap_c);
1884  s->dsp.get_pixels(s->block[7], ptr_cr + 8, wrap_c);
1885  s->dsp.get_pixels(s->block[8], ptr_cb + uv_dct_offset, wrap_c);
1886  s->dsp.get_pixels(s->block[9], ptr_cr + uv_dct_offset, wrap_c);
1887  s->dsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
1888  s->dsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
1889  }
1890  }
1891  } else {
1892  op_pixels_func (*op_pix)[4];
1893  qpel_mc_func (*op_qpix)[16];
1894  uint8_t *dest_y, *dest_cb, *dest_cr;
1895 
1896  dest_y = s->dest[0];
1897  dest_cb = s->dest[1];
1898  dest_cr = s->dest[2];
1899 
1900  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1901  op_pix = s->hdsp.put_pixels_tab;
1902  op_qpix = s->dsp.put_qpel_pixels_tab;
1903  } else {
1904  op_pix = s->hdsp.put_no_rnd_pixels_tab;
1905  op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1906  }
1907 
1908  if (s->mv_dir & MV_DIR_FORWARD) {
1909  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1910  s->last_picture.f.data,
1911  op_pix, op_qpix);
1912  op_pix = s->hdsp.avg_pixels_tab;
1913  op_qpix = s->dsp.avg_qpel_pixels_tab;
1914  }
1915  if (s->mv_dir & MV_DIR_BACKWARD) {
1916  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1917  s->next_picture.f.data,
1918  op_pix, op_qpix);
1919  }
1920 
1921  if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1922  int progressive_score, interlaced_score;
1923 
1924  s->interlaced_dct = 0;
1925  progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1926  ptr_y, wrap_y,
1927  8) +
1928  s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1929  ptr_y + wrap_y * 8, wrap_y,
1930  8) - 400;
1931 
1932  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1933  progressive_score -= 400;
1934 
1935  if (progressive_score > 0) {
1936  interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1937  ptr_y,
1938  wrap_y * 2, 8) +
1939  s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1940  ptr_y + wrap_y,
1941  wrap_y * 2, 8);
1942 
1943  if (progressive_score > interlaced_score) {
1944  s->interlaced_dct = 1;
1945 
1946  dct_offset = wrap_y;
1947  uv_dct_offset = wrap_c;
1948  wrap_y <<= 1;
1949  if (s->chroma_format == CHROMA_422)
1950  wrap_c <<= 1;
1951  }
1952  }
1953  }
1954 
1955  s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1956  s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1957  s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1958  dest_y + dct_offset, wrap_y);
1959  s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1960  dest_y + dct_offset + 8, wrap_y);
1961 
1962  if (s->flags & CODEC_FLAG_GRAY) {
1963  skip_dct[4] = 1;
1964  skip_dct[5] = 1;
1965  } else {
1966  s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1967  s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1968  if (!s->chroma_y_shift) { /* 422 */
1969  s->dsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
1970  dest_cb + uv_dct_offset, wrap_c);
1971  s->dsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
1972  dest_cr + uv_dct_offset, wrap_c);
1973  }
1974  }
1975  /* pre quantization */
1976  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1977  2 * s->qscale * s->qscale) {
1978  // FIXME optimize
1979  if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1980  wrap_y, 8) < 20 * s->qscale)
1981  skip_dct[0] = 1;
1982  if (s->dsp.sad[1](NULL, ptr_y + 8,
1983  dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1984  skip_dct[1] = 1;
1985  if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1986  dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1987  skip_dct[2] = 1;
1988  if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1989  dest_y + dct_offset + 8,
1990  wrap_y, 8) < 20 * s->qscale)
1991  skip_dct[3] = 1;
1992  if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1993  wrap_c, 8) < 20 * s->qscale)
1994  skip_dct[4] = 1;
1995  if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1996  wrap_c, 8) < 20 * s->qscale)
1997  skip_dct[5] = 1;
1998  if (!s->chroma_y_shift) { /* 422 */
1999  if (s->dsp.sad[1](NULL, ptr_cb + uv_dct_offset,
2000  dest_cb + uv_dct_offset,
2001  wrap_c, 8) < 20 * s->qscale)
2002  skip_dct[6] = 1;
2003  if (s->dsp.sad[1](NULL, ptr_cr + uv_dct_offset,
2004  dest_cr + uv_dct_offset,
2005  wrap_c, 8) < 20 * s->qscale)
2006  skip_dct[7] = 1;
2007  }
2008  }
2009  }
2010 
2011  if (s->quantizer_noise_shaping) {
2012  if (!skip_dct[0])
2013  get_visual_weight(weight[0], ptr_y , wrap_y);
2014  if (!skip_dct[1])
2015  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2016  if (!skip_dct[2])
2017  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2018  if (!skip_dct[3])
2019  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2020  if (!skip_dct[4])
2021  get_visual_weight(weight[4], ptr_cb , wrap_c);
2022  if (!skip_dct[5])
2023  get_visual_weight(weight[5], ptr_cr , wrap_c);
2024  if (!s->chroma_y_shift) { /* 422 */
2025  if (!skip_dct[6])
2026  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2027  wrap_c);
2028  if (!skip_dct[7])
2029  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2030  wrap_c);
2031  }
2032  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2033  }
2034 
2035  /* DCT & quantize */
2036  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2037  {
2038  for (i = 0; i < mb_block_count; i++) {
2039  if (!skip_dct[i]) {
2040  int overflow;
2041  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2042  // FIXME we could decide to change to quantizer instead of
2043  // clipping
2044  // JS: I don't think that would be a good idea it could lower
2045  // quality instead of improve it. Just INTRADC clipping
2046  // deserves changes in quantizer
2047  if (overflow)
2048  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2049  } else
2050  s->block_last_index[i] = -1;
2051  }
2052  if (s->quantizer_noise_shaping) {
2053  for (i = 0; i < mb_block_count; i++) {
2054  if (!skip_dct[i]) {
2055  s->block_last_index[i] =
2056  dct_quantize_refine(s, s->block[i], weight[i],
2057  orig[i], i, s->qscale);
2058  }
2059  }
2060  }
2061 
2062  if (s->luma_elim_threshold && !s->mb_intra)
2063  for (i = 0; i < 4; i++)
2065  if (s->chroma_elim_threshold && !s->mb_intra)
2066  for (i = 4; i < mb_block_count; i++)
2068 
2069  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2070  for (i = 0; i < mb_block_count; i++) {
2071  if (s->block_last_index[i] == -1)
2072  s->coded_score[i] = INT_MAX / 256;
2073  }
2074  }
2075  }
2076 
2077  if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2078  s->block_last_index[4] =
2079  s->block_last_index[5] = 0;
2080  s->block[4][0] =
2081  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2082  if (!s->chroma_y_shift) { /* 422 / 444 */
2083  for (i=6; i<12; i++) {
2084  s->block_last_index[i] = 0;
2085  s->block[i][0] = s->block[4][0];
2086  }
2087  }
2088  }
2089 
2090  // non c quantize code returns incorrect block_last_index FIXME
2091  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2092  for (i = 0; i < mb_block_count; i++) {
2093  int j;
2094  if (s->block_last_index[i] > 0) {
2095  for (j = 63; j > 0; j--) {
2096  if (s->block[i][s->intra_scantable.permutated[j]])
2097  break;
2098  }
2099  s->block_last_index[i] = j;
2100  }
2101  }
2102  }
2103 
2104  /* huffman encode */
2105  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2108  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2109  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2110  break;
2111  case AV_CODEC_ID_MPEG4:
2112  if (CONFIG_MPEG4_ENCODER)
2113  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2114  break;
2115  case AV_CODEC_ID_MSMPEG4V2:
2116  case AV_CODEC_ID_MSMPEG4V3:
2117  case AV_CODEC_ID_WMV1:
2119  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2120  break;
2121  case AV_CODEC_ID_WMV2:
2122  if (CONFIG_WMV2_ENCODER)
2123  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2124  break;
2125  case AV_CODEC_ID_H261:
2126  if (CONFIG_H261_ENCODER)
2127  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2128  break;
2129  case AV_CODEC_ID_H263:
2130  case AV_CODEC_ID_H263P:
2131  case AV_CODEC_ID_FLV1:
2132  case AV_CODEC_ID_RV10:
2133  case AV_CODEC_ID_RV20:
2134  if (CONFIG_H263_ENCODER)
2135  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2136  break;
2137  case AV_CODEC_ID_MJPEG:
2138  case AV_CODEC_ID_AMV:
2139  if (CONFIG_MJPEG_ENCODER)
2140  ff_mjpeg_encode_mb(s, s->block);
2141  break;
2142  default:
2143  av_assert1(0);
2144  }
2145 }
2146 
2147 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2148 {
2149  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2150  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2151  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2152 }
2153 
2154 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2155  int i;
2156 
2157  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2158 
2159  /* mpeg1 */
2160  d->mb_skip_run= s->mb_skip_run;
2161  for(i=0; i<3; i++)
2162  d->last_dc[i] = s->last_dc[i];
2163 
2164  /* statistics */
2165  d->mv_bits= s->mv_bits;
2166  d->i_tex_bits= s->i_tex_bits;
2167  d->p_tex_bits= s->p_tex_bits;
2168  d->i_count= s->i_count;
2169  d->f_count= s->f_count;
2170  d->b_count= s->b_count;
2171  d->skip_count= s->skip_count;
2172  d->misc_bits= s->misc_bits;
2173  d->last_bits= 0;
2174 
2175  d->mb_skipped= 0;
2176  d->qscale= s->qscale;
2177  d->dquant= s->dquant;
2178 
2180 }
2181 
2182 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2183  int i;
2184 
2185  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2186  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2187 
2188  /* mpeg1 */
2189  d->mb_skip_run= s->mb_skip_run;
2190  for(i=0; i<3; i++)
2191  d->last_dc[i] = s->last_dc[i];
2192 
2193  /* statistics */
2194  d->mv_bits= s->mv_bits;
2195  d->i_tex_bits= s->i_tex_bits;
2196  d->p_tex_bits= s->p_tex_bits;
2197  d->i_count= s->i_count;
2198  d->f_count= s->f_count;
2199  d->b_count= s->b_count;
2200  d->skip_count= s->skip_count;
2201  d->misc_bits= s->misc_bits;
2202 
2203  d->mb_intra= s->mb_intra;
2204  d->mb_skipped= s->mb_skipped;
2205  d->mv_type= s->mv_type;
2206  d->mv_dir= s->mv_dir;
2207  d->pb= s->pb;
2208  if(s->data_partitioning){
2209  d->pb2= s->pb2;
2210  d->tex_pb= s->tex_pb;
2211  }
2212  d->block= s->block;
2213  for(i=0; i<8; i++)
2214  d->block_last_index[i]= s->block_last_index[i];
2216  d->qscale= s->qscale;
2217 
2219 }
2220 
2221 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2223  int *dmin, int *next_block, int motion_x, int motion_y)
2224 {
2225  int score;
2226  uint8_t *dest_backup[3];
2227 
2228  copy_context_before_encode(s, backup, type);
2229 
2230  s->block= s->blocks[*next_block];
2231  s->pb= pb[*next_block];
2232  if(s->data_partitioning){
2233  s->pb2 = pb2 [*next_block];
2234  s->tex_pb= tex_pb[*next_block];
2235  }
2236 
2237  if(*next_block){
2238  memcpy(dest_backup, s->dest, sizeof(s->dest));
2239  s->dest[0] = s->rd_scratchpad;
2240  s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2241  s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2242  assert(s->linesize >= 32); //FIXME
2243  }
2244 
2245  encode_mb(s, motion_x, motion_y);
2246 
2247  score= put_bits_count(&s->pb);
2248  if(s->data_partitioning){
2249  score+= put_bits_count(&s->pb2);
2250  score+= put_bits_count(&s->tex_pb);
2251  }
2252 
2253  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2254  ff_MPV_decode_mb(s, s->block);
2255 
2256  score *= s->lambda2;
2257  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2258  }
2259 
2260  if(*next_block){
2261  memcpy(s->dest, dest_backup, sizeof(s->dest));
2262  }
2263 
2264  if(score<*dmin){
2265  *dmin= score;
2266  *next_block^=1;
2267 
2268  copy_context_after_encode(best, s, type);
2269  }
2270 }
2271 
2272 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2273  uint32_t *sq = ff_squareTbl + 256;
2274  int acc=0;
2275  int x,y;
2276 
2277  if(w==16 && h==16)
2278  return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2279  else if(w==8 && h==8)
2280  return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2281 
2282  for(y=0; y<h; y++){
2283  for(x=0; x<w; x++){
2284  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2285  }
2286  }
2287 
2288  av_assert2(acc>=0);
2289 
2290  return acc;
2291 }
2292 
2293 static int sse_mb(MpegEncContext *s){
2294  int w= 16;
2295  int h= 16;
2296 
2297  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2298  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2299 
2300  if(w==16 && h==16)
2301  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2302  return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2303  +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2304  +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2305  }else{
2306  return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2307  +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2308  +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2309  }
2310  else
2311  return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2312  +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2313  +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2314 }
2315 
2317  MpegEncContext *s= *(void**)arg;
2318 
2319 
2320  s->me.pre_pass=1;
2321  s->me.dia_size= s->avctx->pre_dia_size;
2322  s->first_slice_line=1;
2323  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2324  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2326  }
2327  s->first_slice_line=0;
2328  }
2329 
2330  s->me.pre_pass=0;
2331 
2332  return 0;
2333 }
2334 
2336  MpegEncContext *s= *(void**)arg;
2337 
2339 
2340  s->me.dia_size= s->avctx->dia_size;
2341  s->first_slice_line=1;
2342  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2343  s->mb_x=0; //for block init below
2345  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2346  s->block_index[0]+=2;
2347  s->block_index[1]+=2;
2348  s->block_index[2]+=2;
2349  s->block_index[3]+=2;
2350 
2351  /* compute motion vector & mb_type and store in context */
2354  else
2356  }
2357  s->first_slice_line=0;
2358  }
2359  return 0;
2360 }
2361 
2362 static int mb_var_thread(AVCodecContext *c, void *arg){
2363  MpegEncContext *s= *(void**)arg;
2364  int mb_x, mb_y;
2365 
2367 
2368  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2369  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2370  int xx = mb_x * 16;
2371  int yy = mb_y * 16;
2372  uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2373  int varc;
2374  int sum = s->dsp.pix_sum(pix, s->linesize);
2375 
2376  varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2377 
2378  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2379  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2380  s->me.mb_var_sum_temp += varc;
2381  }
2382  }
2383  return 0;
2384 }
2385 
2387  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2388  if(s->partitioned_frame){
2390  }
2391 
2392  ff_mpeg4_stuffing(&s->pb);
2393  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2395  }
2396 
2398  flush_put_bits(&s->pb);
2399 
2400  if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2401  s->misc_bits+= get_bits_diff(s);
2402 }
2403 
2405 {
2406  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2407  int offset = put_bits_count(&s->pb);
2408  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2409  int gobn = s->mb_y / s->gob_index;
2410  int pred_x, pred_y;
2411  if (CONFIG_H263_ENCODER)
2412  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2413  bytestream_put_le32(&ptr, offset);
2414  bytestream_put_byte(&ptr, s->qscale);
2415  bytestream_put_byte(&ptr, gobn);
2416  bytestream_put_le16(&ptr, mba);
2417  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2418  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2419  /* 4MV not implemented */
2420  bytestream_put_byte(&ptr, 0); /* hmv2 */
2421  bytestream_put_byte(&ptr, 0); /* vmv2 */
2422 }
2423 
2424 static void update_mb_info(MpegEncContext *s, int startcode)
2425 {
2426  if (!s->mb_info)
2427  return;
2428  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2429  s->mb_info_size += 12;
2430  s->prev_mb_info = s->last_mb_info;
2431  }
2432  if (startcode) {
2433  s->prev_mb_info = put_bits_count(&s->pb)/8;
2434  /* This might have incremented mb_info_size above, and we return without
2435  * actually writing any info into that slot yet. But in that case,
2436  * this will be called again at the start of the after writing the
2437  * start code, actually writing the mb info. */
2438  return;
2439  }
2440 
2441  s->last_mb_info = put_bits_count(&s->pb)/8;
2442  if (!s->mb_info_size)
2443  s->mb_info_size += 12;
2444  write_mb_info(s);
2445 }
2446 
2447 static int encode_thread(AVCodecContext *c, void *arg){
2448  MpegEncContext *s= *(void**)arg;
2449  int mb_x, mb_y, pdif = 0;
2450  int chr_h= 16>>s->chroma_y_shift;
2451  int i, j;
2452  MpegEncContext best_s, backup_s;
2453  uint8_t bit_buf[2][MAX_MB_BYTES];
2454  uint8_t bit_buf2[2][MAX_MB_BYTES];
2455  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2456  PutBitContext pb[2], pb2[2], tex_pb[2];
2457 
2459 
2460  for(i=0; i<2; i++){
2461  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2462  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2463  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2464  }
2465 
2466  s->last_bits= put_bits_count(&s->pb);
2467  s->mv_bits=0;
2468  s->misc_bits=0;
2469  s->i_tex_bits=0;
2470  s->p_tex_bits=0;
2471  s->i_count=0;
2472  s->f_count=0;
2473  s->b_count=0;
2474  s->skip_count=0;
2475 
2476  for(i=0; i<3; i++){
2477  /* init last dc values */
2478  /* note: quant matrix value (8) is implied here */
2479  s->last_dc[i] = 128 << s->intra_dc_precision;
2480 
2481  s->current_picture.f.error[i] = 0;
2482  }
2483  if(s->codec_id==AV_CODEC_ID_AMV){
2484  s->last_dc[0] = 128*8/13;
2485  s->last_dc[1] = 128*8/14;
2486  s->last_dc[2] = 128*8/14;
2487  }
2488  s->mb_skip_run = 0;
2489  memset(s->last_mv, 0, sizeof(s->last_mv));
2490 
2491  s->last_mv_dir = 0;
2492 
2493  switch(s->codec_id){
2494  case AV_CODEC_ID_H263:
2495  case AV_CODEC_ID_H263P:
2496  case AV_CODEC_ID_FLV1:
2497  if (CONFIG_H263_ENCODER)
2499  break;
2500  case AV_CODEC_ID_MPEG4:
2501  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2503  break;
2504  }
2505 
2506  s->resync_mb_x=0;
2507  s->resync_mb_y=0;
2508  s->first_slice_line = 1;
2509  s->ptr_lastgob = s->pb.buf;
2510  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2511  s->mb_x=0;
2512  s->mb_y= mb_y;
2513 
2514  ff_set_qscale(s, s->qscale);
2516 
2517  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2518  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2519  int mb_type= s->mb_type[xy];
2520 // int d;
2521  int dmin= INT_MAX;
2522  int dir;
2523 
2524  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2525  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2526  return -1;
2527  }
2528  if(s->data_partitioning){
2529  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2530  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2531  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2532  return -1;
2533  }
2534  }
2535 
2536  s->mb_x = mb_x;
2537  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2539 
2540  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2542  xy= s->mb_y*s->mb_stride + s->mb_x;
2543  mb_type= s->mb_type[xy];
2544  }
2545 
2546  /* write gob / video packet header */
2547  if(s->rtp_mode){
2548  int current_packet_size, is_gob_start;
2549 
2550  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2551 
2552  is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2553 
2554  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2555 
2556  switch(s->codec_id){
2557  case AV_CODEC_ID_H263:
2558  case AV_CODEC_ID_H263P:
2559  if(!s->h263_slice_structured)
2560  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2561  break;
2563  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2565  if(s->mb_skip_run) is_gob_start=0;
2566  break;
2567  case AV_CODEC_ID_MJPEG:
2568  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2569  break;
2570  }
2571 
2572  if(is_gob_start){
2573  if(s->start_mb_y != mb_y || mb_x!=0){
2574  write_slice_end(s);
2575  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2577  }
2578  }
2579 
2580  av_assert2((put_bits_count(&s->pb)&7) == 0);
2581  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2582 
2583  if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2584  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2585  int d= 100 / s->avctx->error_rate;
2586  if(r % d == 0){
2587  current_packet_size=0;
2588  s->pb.buf_ptr= s->ptr_lastgob;
2589  assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2590  }
2591  }
2592 
2593  if (s->avctx->rtp_callback){
2594  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2595  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2596  }
2597  update_mb_info(s, 1);
2598 
2599  switch(s->codec_id){
2600  case AV_CODEC_ID_MPEG4:
2601  if (CONFIG_MPEG4_ENCODER) {
2604  }
2605  break;
2608  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2611  }
2612  break;
2613  case AV_CODEC_ID_H263:
2614  case AV_CODEC_ID_H263P:
2615  if (CONFIG_H263_ENCODER)
2616  ff_h263_encode_gob_header(s, mb_y);
2617  break;
2618  }
2619 
2620  if(s->flags&CODEC_FLAG_PASS1){
2621  int bits= put_bits_count(&s->pb);
2622  s->misc_bits+= bits - s->last_bits;
2623  s->last_bits= bits;
2624  }
2625 
2626  s->ptr_lastgob += current_packet_size;
2627  s->first_slice_line=1;
2628  s->resync_mb_x=mb_x;
2629  s->resync_mb_y=mb_y;
2630  }
2631  }
2632 
2633  if( (s->resync_mb_x == s->mb_x)
2634  && s->resync_mb_y+1 == s->mb_y){
2635  s->first_slice_line=0;
2636  }
2637 
2638  s->mb_skipped=0;
2639  s->dquant=0; //only for QP_RD
2640 
2641  update_mb_info(s, 0);
2642 
2643  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2644  int next_block=0;
2645  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2646 
2647  copy_context_before_encode(&backup_s, s, -1);
2648  backup_s.pb= s->pb;
2651  if(s->data_partitioning){
2652  backup_s.pb2= s->pb2;
2653  backup_s.tex_pb= s->tex_pb;
2654  }
2655 
2656  if(mb_type&CANDIDATE_MB_TYPE_INTER){
2657  s->mv_dir = MV_DIR_FORWARD;
2658  s->mv_type = MV_TYPE_16X16;
2659  s->mb_intra= 0;
2660  s->mv[0][0][0] = s->p_mv_table[xy][0];
2661  s->mv[0][0][1] = s->p_mv_table[xy][1];
2662  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2663  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2664  }
2665  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2666  s->mv_dir = MV_DIR_FORWARD;
2667  s->mv_type = MV_TYPE_FIELD;
2668  s->mb_intra= 0;
2669  for(i=0; i<2; i++){
2670  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2671  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2672  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2673  }
2674  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2675  &dmin, &next_block, 0, 0);
2676  }
2677  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2678  s->mv_dir = MV_DIR_FORWARD;
2679  s->mv_type = MV_TYPE_16X16;
2680  s->mb_intra= 0;
2681  s->mv[0][0][0] = 0;
2682  s->mv[0][0][1] = 0;
2683  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2684  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2685  }
2686  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2687  s->mv_dir = MV_DIR_FORWARD;
2688  s->mv_type = MV_TYPE_8X8;
2689  s->mb_intra= 0;
2690  for(i=0; i<4; i++){
2691  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2692  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2693  }
2694  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2695  &dmin, &next_block, 0, 0);
2696  }
2697  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2698  s->mv_dir = MV_DIR_FORWARD;
2699  s->mv_type = MV_TYPE_16X16;
2700  s->mb_intra= 0;
2701  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2702  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2703  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2704  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2705  }
2706  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2707  s->mv_dir = MV_DIR_BACKWARD;
2708  s->mv_type = MV_TYPE_16X16;
2709  s->mb_intra= 0;
2710  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2711  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2712  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2713  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2714  }
2715  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2717  s->mv_type = MV_TYPE_16X16;
2718  s->mb_intra= 0;
2719  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2720  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2721  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2722  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2723  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2724  &dmin, &next_block, 0, 0);
2725  }
2726  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2727  s->mv_dir = MV_DIR_FORWARD;
2728  s->mv_type = MV_TYPE_FIELD;
2729  s->mb_intra= 0;
2730  for(i=0; i<2; i++){
2731  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2732  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2733  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2734  }
2735  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2736  &dmin, &next_block, 0, 0);
2737  }
2738  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2739  s->mv_dir = MV_DIR_BACKWARD;
2740  s->mv_type = MV_TYPE_FIELD;
2741  s->mb_intra= 0;
2742  for(i=0; i<2; i++){
2743  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2744  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2745  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2746  }
2747  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2748  &dmin, &next_block, 0, 0);
2749  }
2750  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2752  s->mv_type = MV_TYPE_FIELD;
2753  s->mb_intra= 0;
2754  for(dir=0; dir<2; dir++){
2755  for(i=0; i<2; i++){
2756  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2757  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2758  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2759  }
2760  }
2761  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2762  &dmin, &next_block, 0, 0);
2763  }
2764  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2765  s->mv_dir = 0;
2766  s->mv_type = MV_TYPE_16X16;
2767  s->mb_intra= 1;
2768  s->mv[0][0][0] = 0;
2769  s->mv[0][0][1] = 0;
2770  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2771  &dmin, &next_block, 0, 0);
2772  if(s->h263_pred || s->h263_aic){
2773  if(best_s.mb_intra)
2774  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2775  else
2776  ff_clean_intra_table_entries(s); //old mode?
2777  }
2778  }
2779 
2780  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2781  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2782  const int last_qp= backup_s.qscale;
2783  int qpi, qp, dc[6];
2784  int16_t ac[6][16];
2785  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2786  static const int dquant_tab[4]={-1,1,-2,2};
2787  int storecoefs = s->mb_intra && s->dc_val[0];
2788 
2789  av_assert2(backup_s.dquant == 0);
2790 
2791  //FIXME intra
2792  s->mv_dir= best_s.mv_dir;
2793  s->mv_type = MV_TYPE_16X16;
2794  s->mb_intra= best_s.mb_intra;
2795  s->mv[0][0][0] = best_s.mv[0][0][0];
2796  s->mv[0][0][1] = best_s.mv[0][0][1];
2797  s->mv[1][0][0] = best_s.mv[1][0][0];
2798  s->mv[1][0][1] = best_s.mv[1][0][1];
2799 
2800  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2801  for(; qpi<4; qpi++){
2802  int dquant= dquant_tab[qpi];
2803  qp= last_qp + dquant;
2804  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2805  continue;
2806  backup_s.dquant= dquant;
2807  if(storecoefs){
2808  for(i=0; i<6; i++){
2809  dc[i]= s->dc_val[0][ s->block_index[i] ];
2810  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2811  }
2812  }
2813 
2814  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2815  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2816  if(best_s.qscale != qp){
2817  if(storecoefs){
2818  for(i=0; i<6; i++){
2819  s->dc_val[0][ s->block_index[i] ]= dc[i];
2820  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2821  }
2822  }
2823  }
2824  }
2825  }
2826  }
2827  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2828  int mx= s->b_direct_mv_table[xy][0];
2829  int my= s->b_direct_mv_table[xy][1];
2830 
2831  backup_s.dquant = 0;
2833  s->mb_intra= 0;
2834  ff_mpeg4_set_direct_mv(s, mx, my);
2835  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2836  &dmin, &next_block, mx, my);
2837  }
2838  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2839  backup_s.dquant = 0;
2841  s->mb_intra= 0;
2842  ff_mpeg4_set_direct_mv(s, 0, 0);
2843  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2844  &dmin, &next_block, 0, 0);
2845  }
2846  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2847  int coded=0;
2848  for(i=0; i<6; i++)
2849  coded |= s->block_last_index[i];
2850  if(coded){
2851  int mx,my;
2852  memcpy(s->mv, best_s.mv, sizeof(s->mv));
2853  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2854  mx=my=0; //FIXME find the one we actually used
2855  ff_mpeg4_set_direct_mv(s, mx, my);
2856  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2857  mx= s->mv[1][0][0];
2858  my= s->mv[1][0][1];
2859  }else{
2860  mx= s->mv[0][0][0];
2861  my= s->mv[0][0][1];
2862  }
2863 
2864  s->mv_dir= best_s.mv_dir;
2865  s->mv_type = best_s.mv_type;
2866  s->mb_intra= 0;
2867 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2868  s->mv[0][0][1] = best_s.mv[0][0][1];
2869  s->mv[1][0][0] = best_s.mv[1][0][0];
2870  s->mv[1][0][1] = best_s.mv[1][0][1];*/
2871  backup_s.dquant= 0;
2872  s->skipdct=1;
2873  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2874  &dmin, &next_block, mx, my);
2875  s->skipdct=0;
2876  }
2877  }
2878 
2879  s->current_picture.qscale_table[xy] = best_s.qscale;
2880 
2881  copy_context_after_encode(s, &best_s, -1);
2882 
2883  pb_bits_count= put_bits_count(&s->pb);
2884  flush_put_bits(&s->pb);
2885  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2886  s->pb= backup_s.pb;
2887 
2888  if(s->data_partitioning){
2889  pb2_bits_count= put_bits_count(&s->pb2);
2890  flush_put_bits(&s->pb2);
2891  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2892  s->pb2= backup_s.pb2;
2893 
2894  tex_pb_bits_count= put_bits_count(&s->tex_pb);
2895  flush_put_bits(&s->tex_pb);
2896  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2897  s->tex_pb= backup_s.tex_pb;
2898  }
2899  s->last_bits= put_bits_count(&s->pb);
2900 
2901  if (CONFIG_H263_ENCODER &&
2904 
2905  if(next_block==0){ //FIXME 16 vs linesize16
2906  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2907  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2908  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2909  }
2910 
2912  ff_MPV_decode_mb(s, s->block);
2913  } else {
2914  int motion_x = 0, motion_y = 0;
2916  // only one MB-Type possible
2917 
2918  switch(mb_type){
2920  s->mv_dir = 0;
2921  s->mb_intra= 1;
2922  motion_x= s->mv[0][0][0] = 0;
2923  motion_y= s->mv[0][0][1] = 0;
2924  break;
2926  s->mv_dir = MV_DIR_FORWARD;
2927  s->mb_intra= 0;
2928  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2929  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2930  break;
2932  s->mv_dir = MV_DIR_FORWARD;
2933  s->mv_type = MV_TYPE_FIELD;
2934  s->mb_intra= 0;
2935  for(i=0; i<2; i++){
2936  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2937  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2938  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2939  }
2940  break;
2942  s->mv_dir = MV_DIR_FORWARD;
2943  s->mv_type = MV_TYPE_8X8;
2944  s->mb_intra= 0;
2945  for(i=0; i<4; i++){
2946  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2947  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2948  }
2949  break;
2951  if (CONFIG_MPEG4_ENCODER) {
2953  s->mb_intra= 0;
2954  motion_x=s->b_direct_mv_table[xy][0];
2955  motion_y=s->b_direct_mv_table[xy][1];
2956  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2957  }
2958  break;
2960  if (CONFIG_MPEG4_ENCODER) {
2962  s->mb_intra= 0;
2963  ff_mpeg4_set_direct_mv(s, 0, 0);
2964  }
2965  break;
2968  s->mb_intra= 0;
2969  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2970  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2971  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2972  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2973  break;
2975  s->mv_dir = MV_DIR_BACKWARD;
2976  s->mb_intra= 0;
2977  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2978  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2979  break;
2981  s->mv_dir = MV_DIR_FORWARD;
2982  s->mb_intra= 0;
2983  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2984  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2985  break;
2987  s->mv_dir = MV_DIR_FORWARD;
2988  s->mv_type = MV_TYPE_FIELD;
2989  s->mb_intra= 0;
2990  for(i=0; i<2; i++){
2991  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2992  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2993  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2994  }
2995  break;
2997  s->mv_dir = MV_DIR_BACKWARD;
2998  s->mv_type = MV_TYPE_FIELD;
2999  s->mb_intra= 0;
3000  for(i=0; i<2; i++){
3001  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3002  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3003  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3004  }
3005  break;
3008  s->mv_type = MV_TYPE_FIELD;
3009  s->mb_intra= 0;
3010  for(dir=0; dir<2; dir++){
3011  for(i=0; i<2; i++){
3012  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3013  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3014  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3015  }
3016  }
3017  break;
3018  default:
3019  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3020  }
3021 
3022  encode_mb(s, motion_x, motion_y);
3023 
3024  // RAL: Update last macroblock type
3025  s->last_mv_dir = s->mv_dir;
3026 
3027  if (CONFIG_H263_ENCODER &&
3030 
3031  ff_MPV_decode_mb(s, s->block);
3032  }
3033 
3034  /* clean the MV table in IPS frames for direct mode in B frames */
3035  if(s->mb_intra /* && I,P,S_TYPE */){
3036  s->p_mv_table[xy][0]=0;
3037  s->p_mv_table[xy][1]=0;
3038  }
3039 
3040  if(s->flags&CODEC_FLAG_PSNR){
3041  int w= 16;
3042  int h= 16;
3043 
3044  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3045  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3046 
3047  s->current_picture.f.error[0] += sse(
3048  s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3049  s->dest[0], w, h, s->linesize);
3050  s->current_picture.f.error[1] += sse(
3051  s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3052  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3053  s->current_picture.f.error[2] += sse(
3054  s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3055  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3056  }
3057  if(s->loop_filter){
3058  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3060  }
3061  av_dlog(s->avctx, "MB %d %d bits\n",
3062  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3063  }
3064  }
3065 
3066  //not beautiful here but we must write it before flushing so it has to be here
3069 
3070  write_slice_end(s);
3071 
3072  /* Send the last GOB if RTP */
3073  if (s->avctx->rtp_callback) {
3074  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3075  pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3076  /* Call the RTP callback to send the last GOB */
3077  emms_c();
3078  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3079  }
3080 
3081  return 0;
3082 }
3083 
3084 #define MERGE(field) dst->field += src->field; src->field=0
3086  MERGE(me.scene_change_score);
3087  MERGE(me.mc_mb_var_sum_temp);
3088  MERGE(me.mb_var_sum_temp);
3089 }
3090 
3092  int i;
3093 
3094  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3095  MERGE(dct_count[1]);
3096  MERGE(mv_bits);
3097  MERGE(i_tex_bits);
3098  MERGE(p_tex_bits);
3099  MERGE(i_count);
3100  MERGE(f_count);
3101  MERGE(b_count);
3102  MERGE(skip_count);
3103  MERGE(misc_bits);
3104  MERGE(er.error_count);
3109 
3110  if(dst->avctx->noise_reduction){
3111  for(i=0; i<64; i++){
3112  MERGE(dct_error_sum[0][i]);
3113  MERGE(dct_error_sum[1][i]);
3114  }
3115  }
3116 
3117  assert(put_bits_count(&src->pb) % 8 ==0);
3118  assert(put_bits_count(&dst->pb) % 8 ==0);
3119  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3120  flush_put_bits(&dst->pb);
3121 }
3122 
3123 static int estimate_qp(MpegEncContext *s, int dry_run){
3124  if (s->next_lambda){
3127  if(!dry_run) s->next_lambda= 0;
3128  } else if (!s->fixed_qscale) {
3131  if (s->current_picture.f.quality < 0)
3132  return -1;
3133  }
3134 
3135  if(s->adaptive_quant){
3136  switch(s->codec_id){
3137  case AV_CODEC_ID_MPEG4:
3138  if (CONFIG_MPEG4_ENCODER)
3140  break;
3141  case AV_CODEC_ID_H263:
3142  case AV_CODEC_ID_H263P:
3143  case AV_CODEC_ID_FLV1:
3144  if (CONFIG_H263_ENCODER)
3146  break;
3147  default:
3148  ff_init_qscale_tab(s);
3149  }
3150 
3151  s->lambda= s->lambda_table[0];
3152  //FIXME broken
3153  }else
3154  s->lambda = s->current_picture.f.quality;
3155  update_qscale(s);
3156  return 0;
3157 }
3158 
3159 /* must be called before writing the header */
3161  assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3162  s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3163 
3164  if(s->pict_type==AV_PICTURE_TYPE_B){
3165  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3166  assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3167  }else{
3168  s->pp_time= s->time - s->last_non_b_time;
3169  s->last_non_b_time= s->time;
3170  assert(s->picture_number==0 || s->pp_time > 0);
3171  }
3172 }
3173 
3175 {
3176  int i, ret;
3177  int bits;
3178  int context_count = s->slice_context_count;
3179 
3181 
3182  /* Reset the average MB variance */
3183  s->me.mb_var_sum_temp =
3184  s->me.mc_mb_var_sum_temp = 0;
3185 
3186  /* we need to initialize some time vars before we can encode b-frames */
3187  // RAL: Condition added for MPEG1VIDEO
3190  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3191  ff_set_mpeg4_time(s);
3192 
3193  s->me.scene_change_score=0;
3194 
3195 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3196 
3197  if(s->pict_type==AV_PICTURE_TYPE_I){
3198  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3199  else s->no_rounding=0;
3200  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3202  s->no_rounding ^= 1;
3203  }
3204 
3205  if(s->flags & CODEC_FLAG_PASS2){
3206  if (estimate_qp(s,1) < 0)
3207  return -1;
3208  ff_get_2pass_fcode(s);
3209  }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3211  s->lambda= s->last_lambda_for[s->pict_type];
3212  else
3214  update_qscale(s);
3215  }
3216 
3217  if(s->codec_id != AV_CODEC_ID_AMV){
3222  }
3223 
3224  s->mb_intra=0; //for the rate distortion & bit compare functions
3225  for(i=1; i<context_count; i++){
3227  if (ret < 0)
3228  return ret;
3229  }
3230 
3231  if(ff_init_me(s)<0)
3232  return -1;
3233 
3234  /* Estimate motion for every MB */
3235  if(s->pict_type != AV_PICTURE_TYPE_I){
3236  s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3237  s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3238  if (s->pict_type != AV_PICTURE_TYPE_B) {
3239  if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3240  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3241  }
3242  }
3243 
3244  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3245  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3246  /* I-Frame */
3247  for(i=0; i<s->mb_stride*s->mb_height; i++)
3249 
3250  if(!s->fixed_qscale){
3251  /* finding spatial complexity for I-frame rate control */
3252  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3253  }
3254  }
3255  for(i=1; i<context_count; i++){
3257  }
3259  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3260  emms_c();
3261 
3264  for(i=0; i<s->mb_stride*s->mb_height; i++)
3266  if(s->msmpeg4_version >= 3)
3267  s->no_rounding=1;
3268  av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3270  }
3271 
3272  if(!s->umvplus){
3275 
3277  int a,b;
3278  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3280  s->f_code= FFMAX3(s->f_code, a, b);
3281  }
3282 
3283  ff_fix_long_p_mvs(s);
3284  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3286  int j;
3287  for(i=0; i<2; i++){
3288  for(j=0; j<2; j++)
3291  }
3292  }
3293  }
3294 
3295  if(s->pict_type==AV_PICTURE_TYPE_B){
3296  int a, b;
3297 
3300  s->f_code = FFMAX(a, b);
3301 
3304  s->b_code = FFMAX(a, b);
3305 
3311  int dir, j;
3312  for(dir=0; dir<2; dir++){
3313  for(i=0; i<2; i++){
3314  for(j=0; j<2; j++){
3317  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3318  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3319  }
3320  }
3321  }
3322  }
3323  }
3324  }
3325 
3326  if (estimate_qp(s, 0) < 0)
3327  return -1;
3328 
3329  if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3330  s->qscale= 3; //reduce clipping problems
3331 
3332  if (s->out_format == FMT_MJPEG) {
3333  /* for mjpeg, we do include qscale in the matrix */
3334  for(i=1;i<64;i++){
3335  int j= s->dsp.idct_permutation[i];
3336 
3337  s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3338  }
3339  s->y_dc_scale_table=
3343  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3344  s->qscale= 8;
3345  }
3346  if(s->codec_id == AV_CODEC_ID_AMV){
3347  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3348  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3349  for(i=1;i<64;i++){
3350  int j= s->dsp.idct_permutation[ff_zigzag_direct[i]];
3351 
3352  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3353  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3354  }
3355  s->y_dc_scale_table= y;
3356  s->c_dc_scale_table= c;
3357  s->intra_matrix[0] = 13;
3358  s->chroma_intra_matrix[0] = 14;
3360  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3362  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3363  s->qscale= 8;
3364  }
3365 
3366  //FIXME var duplication
3368  s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3371 
3372  if (s->current_picture.f.key_frame)
3373  s->picture_in_gop_number=0;
3374 
3375  s->mb_x = s->mb_y = 0;
3376  s->last_bits= put_bits_count(&s->pb);
3377  switch(s->out_format) {
3378  case FMT_MJPEG:
3379  if (CONFIG_MJPEG_ENCODER)
3381  break;
3382  case FMT_H261:
3383  if (CONFIG_H261_ENCODER)
3384  ff_h261_encode_picture_header(s, picture_number);
3385  break;
3386  case FMT_H263:
3387  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3388  ff_wmv2_encode_picture_header(s, picture_number);
3389  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3390  ff_msmpeg4_encode_picture_header(s, picture_number);
3391  else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3392  ff_mpeg4_encode_picture_header(s, picture_number);
3393  else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3394  ff_rv10_encode_picture_header(s, picture_number);
3395  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3396  ff_rv20_encode_picture_header(s, picture_number);
3397  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3398  ff_flv_encode_picture_header(s, picture_number);
3399  else if (CONFIG_H263_ENCODER)
3400  ff_h263_encode_picture_header(s, picture_number);
3401  break;
3402  case FMT_MPEG1:
3403  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3404  ff_mpeg1_encode_picture_header(s, picture_number);
3405  break;
3406  default:
3407  av_assert0(0);
3408  }
3409  bits= put_bits_count(&s->pb);
3410  s->header_bits= bits - s->last_bits;
3411 
3412  for(i=1; i<context_count; i++){
3414  }
3415  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3416  for(i=1; i<context_count; i++){
3418  }
3419  emms_c();
3420  return 0;
3421 }
3422 
3423 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3424  const int intra= s->mb_intra;
3425  int i;
3426 
3427  s->dct_count[intra]++;
3428 
3429  for(i=0; i<64; i++){
3430  int level= block[i];
3431 
3432  if(level){
3433  if(level>0){
3434  s->dct_error_sum[intra][i] += level;
3435  level -= s->dct_offset[intra][i];
3436  if(level<0) level=0;
3437  }else{
3438  s->dct_error_sum[intra][i] -= level;
3439  level += s->dct_offset[intra][i];
3440  if(level>0) level=0;
3441  }
3442  block[i]= level;
3443  }
3444  }
3445 }
3446 
3448  int16_t *block, int n,
3449  int qscale, int *overflow){
3450  const int *qmat;
3451  const uint8_t *scantable= s->intra_scantable.scantable;
3452  const uint8_t *perm_scantable= s->intra_scantable.permutated;
3453  int max=0;
3454  unsigned int threshold1, threshold2;
3455  int bias=0;
3456  int run_tab[65];
3457  int level_tab[65];
3458  int score_tab[65];
3459  int survivor[65];
3460  int survivor_count;
3461  int last_run=0;
3462  int last_level=0;
3463  int last_score= 0;
3464  int last_i;
3465  int coeff[2][64];
3466  int coeff_count[64];
3467  int qmul, qadd, start_i, last_non_zero, i, dc;
3468  const int esc_length= s->ac_esc_length;
3469  uint8_t * length;
3470  uint8_t * last_length;
3471  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3472 
3473  s->dsp.fdct (block);
3474 
3475  if(s->dct_error_sum)
3476  s->denoise_dct(s, block);
3477  qmul= qscale*16;
3478  qadd= ((qscale-1)|1)*8;
3479 
3480  if (s->mb_intra) {
3481  int q;
3482  if (!s->h263_aic) {
3483  if (n < 4)
3484  q = s->y_dc_scale;
3485  else
3486  q = s->c_dc_scale;
3487  q = q << 3;
3488  } else{
3489  /* For AIC we skip quant/dequant of INTRADC */
3490  q = 1 << 3;
3491  qadd=0;
3492  }
3493 
3494  /* note: block[0] is assumed to be positive */
3495  block[0] = (block[0] + (q >> 1)) / q;
3496  start_i = 1;
3497  last_non_zero = 0;
3498  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3499  if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3500  bias= 1<<(QMAT_SHIFT-1);
3501  length = s->intra_ac_vlc_length;
3502  last_length= s->intra_ac_vlc_last_length;
3503  } else {
3504  start_i = 0;
3505  last_non_zero = -1;
3506  qmat = s->q_inter_matrix[qscale];
3507  length = s->inter_ac_vlc_length;
3508  last_length= s->inter_ac_vlc_last_length;
3509  }
3510  last_i= start_i;
3511 
3512  threshold1= (1<<QMAT_SHIFT) - bias - 1;
3513  threshold2= (threshold1<<1);
3514 
3515  for(i=63; i>=start_i; i--) {
3516  const int j = scantable[i];
3517  int level = block[j] * qmat[j];
3518 
3519  if(((unsigned)(level+threshold1))>threshold2){
3520  last_non_zero = i;
3521  break;
3522  }
3523  }
3524 
3525  for(i=start_i; i<=last_non_zero; i++) {
3526  const int j = scantable[i];
3527  int level = block[j] * qmat[j];
3528 
3529 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3530 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3531  if(((unsigned)(level+threshold1))>threshold2){
3532  if(level>0){
3533  level= (bias + level)>>QMAT_SHIFT;
3534  coeff[0][i]= level;
3535  coeff[1][i]= level-1;
3536 // coeff[2][k]= level-2;
3537  }else{
3538  level= (bias - level)>>QMAT_SHIFT;
3539  coeff[0][i]= -level;
3540  coeff[1][i]= -level+1;
3541 // coeff[2][k]= -level+2;
3542  }
3543  coeff_count[i]= FFMIN(level, 2);
3544  av_assert2(coeff_count[i]);
3545  max |=level;
3546  }else{
3547  coeff[0][i]= (level>>31)|1;
3548  coeff_count[i]= 1;
3549  }
3550  }
3551 
3552  *overflow= s->max_qcoeff < max; //overflow might have happened
3553 
3554  if(last_non_zero < start_i){
3555  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3556  return last_non_zero;
3557  }
3558 
3559  score_tab[start_i]= 0;
3560  survivor[0]= start_i;
3561  survivor_count= 1;
3562 
3563  for(i=start_i; i<=last_non_zero; i++){
3564  int level_index, j, zero_distortion;
3565  int dct_coeff= FFABS(block[ scantable[i] ]);
3566  int best_score=256*256*256*120;
3567 
3568  if (s->dsp.fdct == ff_fdct_ifast)
3569  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3570  zero_distortion= dct_coeff*dct_coeff;
3571 
3572  for(level_index=0; level_index < coeff_count[i]; level_index++){
3573  int distortion;
3574  int level= coeff[level_index][i];
3575  const int alevel= FFABS(level);
3576  int unquant_coeff;
3577 
3578  av_assert2(level);
3579 
3580  if(s->out_format == FMT_H263){
3581  unquant_coeff= alevel*qmul + qadd;
3582  }else{ //MPEG1
3583  j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3584  if(s->mb_intra){
3585  unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3586  unquant_coeff = (unquant_coeff - 1) | 1;
3587  }else{
3588  unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3589  unquant_coeff = (unquant_coeff - 1) | 1;
3590  }
3591  unquant_coeff<<= 3;
3592  }
3593 
3594  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3595  level+=64;
3596  if((level&(~127)) == 0){
3597  for(j=survivor_count-1; j>=0; j--){
3598  int run= i - survivor[j];
3599  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3600  score += score_tab[i-run];
3601 
3602  if(score < best_score){
3603  best_score= score;
3604  run_tab[i+1]= run;
3605  level_tab[i+1]= level-64;
3606  }
3607  }
3608 
3609  if(s->out_format == FMT_H263){
3610  for(j=survivor_count-1; j>=0; j--){
3611  int run= i - survivor[j];
3612  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3613  score += score_tab[i-run];
3614  if(score < last_score){
3615  last_score= score;
3616  last_run= run;
3617  last_level= level-64;
3618  last_i= i+1;
3619  }
3620  }
3621  }
3622  }else{
3623  distortion += esc_length*lambda;
3624  for(j=survivor_count-1; j>=0; j--){
3625  int run= i - survivor[j];
3626  int score= distortion + score_tab[i-run];
3627 
3628  if(score < best_score){
3629  best_score= score;
3630  run_tab[i+1]= run;
3631  level_tab[i+1]= level-64;
3632  }
3633  }
3634 
3635  if(s->out_format == FMT_H263){
3636  for(j=survivor_count-1; j>=0; j--){
3637  int run= i - survivor[j];
3638  int score= distortion + score_tab[i-run];
3639  if(score < last_score){
3640  last_score= score;
3641  last_run= run;
3642  last_level= level-64;
3643  last_i= i+1;
3644  }
3645  }
3646  }
3647  }
3648  }
3649 
3650  score_tab[i+1]= best_score;
3651 
3652  //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3653  if(last_non_zero <= 27){
3654  for(; survivor_count; survivor_count--){
3655  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3656  break;
3657  }
3658  }else{
3659  for(; survivor_count; survivor_count--){
3660  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3661  break;
3662  }
3663  }
3664 
3665  survivor[ survivor_count++ ]= i+1;
3666  }
3667 
3668  if(s->out_format != FMT_H263){
3669  last_score= 256*256*256*120;
3670  for(i= survivor[0]; i<=last_non_zero + 1; i++){
3671  int score= score_tab[i];
3672  if(i) score += lambda*2; //FIXME exacter?
3673 
3674  if(score < last_score){
3675  last_score= score;
3676  last_i= i;
3677  last_level= level_tab[i];
3678  last_run= run_tab[i];
3679  }
3680  }
3681  }
3682 
3683  s->coded_score[n] = last_score;
3684 
3685  dc= FFABS(block[0]);
3686  last_non_zero= last_i - 1;
3687  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3688 
3689  if(last_non_zero < start_i)
3690  return last_non_zero;
3691 
3692  if(last_non_zero == 0 && start_i == 0){
3693  int best_level= 0;
3694  int best_score= dc * dc;
3695 
3696  for(i=0; i<coeff_count[0]; i++){
3697  int level= coeff[i][0];
3698  int alevel= FFABS(level);
3699  int unquant_coeff, score, distortion;
3700 
3701  if(s->out_format == FMT_H263){
3702  unquant_coeff= (alevel*qmul + qadd)>>3;
3703  }else{ //MPEG1
3704  unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3705  unquant_coeff = (unquant_coeff - 1) | 1;
3706  }
3707  unquant_coeff = (unquant_coeff + 4) >> 3;
3708  unquant_coeff<<= 3 + 3;
3709 
3710  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3711  level+=64;
3712  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3713  else score= distortion + esc_length*lambda;
3714 
3715  if(score < best_score){
3716  best_score= score;
3717  best_level= level - 64;
3718  }
3719  }
3720  block[0]= best_level;
3721  s->coded_score[n] = best_score - dc*dc;
3722  if(best_level == 0) return -1;
3723  else return last_non_zero;
3724  }
3725 
3726  i= last_i;
3727  av_assert2(last_level);
3728 
3729  block[ perm_scantable[last_non_zero] ]= last_level;
3730  i -= last_run + 1;
3731 
3732  for(; i>start_i; i -= run_tab[i] + 1){
3733  block[ perm_scantable[i-1] ]= level_tab[i];
3734  }
3735 
3736  return last_non_zero;
3737 }
3738 
3739 //#define REFINE_STATS 1
3740 static int16_t basis[64][64];
3741 
3742 static void build_basis(uint8_t *perm){
3743  int i, j, x, y;
3744  emms_c();
3745  for(i=0; i<8; i++){
3746  for(j=0; j<8; j++){
3747  for(y=0; y<8; y++){
3748  for(x=0; x<8; x++){
3749  double s= 0.25*(1<<BASIS_SHIFT);
3750  int index= 8*i + j;
3751  int perm_index= perm[index];
3752  if(i==0) s*= sqrt(0.5);
3753  if(j==0) s*= sqrt(0.5);
3754  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3755  }
3756  }
3757  }
3758  }
3759 }
3760 
3761 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3762  int16_t *block, int16_t *weight, int16_t *orig,
3763  int n, int qscale){
3764  int16_t rem[64];
3765  LOCAL_ALIGNED_16(int16_t, d1, [64]);
3766  const uint8_t *scantable= s->intra_scantable.scantable;
3767  const uint8_t *perm_scantable= s->intra_scantable.permutated;
3768 // unsigned int threshold1, threshold2;
3769 // int bias=0;
3770  int run_tab[65];
3771  int prev_run=0;
3772  int prev_level=0;
3773  int qmul, qadd, start_i, last_non_zero, i, dc;
3774  uint8_t * length;
3775  uint8_t * last_length;
3776  int lambda;
3777  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3778 #ifdef REFINE_STATS
3779 static int count=0;
3780 static int after_last=0;
3781 static int to_zero=0;
3782 static int from_zero=0;
3783 static int raise=0;
3784 static int lower=0;
3785 static int messed_sign=0;
3786 #endif
3787 
3788  if(basis[0][0] == 0)
3790 
3791  qmul= qscale*2;
3792  qadd= (qscale-1)|1;
3793  if (s->mb_intra) {
3794  if (!s->h263_aic) {
3795  if (n < 4)
3796  q = s->y_dc_scale;
3797  else
3798  q = s->c_dc_scale;
3799  } else{
3800  /* For AIC we skip quant/dequant of INTRADC */
3801  q = 1;
3802  qadd=0;
3803  }
3804  q <<= RECON_SHIFT-3;
3805  /* note: block[0] is assumed to be positive */
3806  dc= block[0]*q;
3807 // block[0] = (block[0] + (q >> 1)) / q;
3808  start_i = 1;
3809 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3810 // bias= 1<<(QMAT_SHIFT-1);
3811  length = s->intra_ac_vlc_length;
3812  last_length= s->intra_ac_vlc_last_length;
3813  } else {
3814  dc= 0;
3815  start_i = 0;
3816  length = s->inter_ac_vlc_length;
3817  last_length= s->inter_ac_vlc_last_length;
3818  }
3819  last_non_zero = s->block_last_index[n];
3820 
3821 #ifdef REFINE_STATS
3822 {START_TIMER
3823 #endif
3824  dc += (1<<(RECON_SHIFT-1));
3825  for(i=0; i<64; i++){
3826  rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3827  }
3828 #ifdef REFINE_STATS
3829 STOP_TIMER("memset rem[]")}
3830 #endif
3831  sum=0;
3832  for(i=0; i<64; i++){
3833  int one= 36;
3834  int qns=4;
3835  int w;
3836 
3837  w= FFABS(weight[i]) + qns*one;
3838  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3839 
3840  weight[i] = w;
3841 // w=weight[i] = (63*qns + (w/2)) / w;
3842 
3843  av_assert2(w>0);
3844  av_assert2(w<(1<<6));
3845  sum += w*w;
3846  }
3847  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3848 #ifdef REFINE_STATS
3849 {START_TIMER
3850 #endif
3851  run=0;
3852  rle_index=0;
3853  for(i=start_i; i<=last_non_zero; i++){
3854  int j= perm_scantable[i];
3855  const int level= block[j];
3856  int coeff;
3857 
3858  if(level){
3859  if(level<0) coeff= qmul*level - qadd;
3860  else coeff= qmul*level + qadd;
3861  run_tab[rle_index++]=run;
3862  run=0;
3863 
3864  s->dsp.add_8x8basis(rem, basis[j], coeff);
3865  }else{
3866  run++;
3867  }
3868  }
3869 #ifdef REFINE_STATS
3870 if(last_non_zero>0){
3871 STOP_TIMER("init rem[]")
3872 }
3873 }
3874 
3875 {START_TIMER
3876 #endif
3877  for(;;){
3878  int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3879  int best_coeff=0;
3880  int best_change=0;
3881  int run2, best_unquant_change=0, analyze_gradient;
3882 #ifdef REFINE_STATS
3883 {START_TIMER
3884 #endif
3885  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3886 
3887  if(analyze_gradient){
3888 #ifdef REFINE_STATS
3889 {START_TIMER
3890 #endif
3891  for(i=0; i<64; i++){
3892  int w= weight[i];
3893 
3894  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3895  }
3896 #ifdef REFINE_STATS
3897 STOP_TIMER("rem*w*w")}
3898 {START_TIMER
3899 #endif
3900  s->dsp.fdct(d1);
3901 #ifdef REFINE_STATS
3902 STOP_TIMER("dct")}
3903 #endif
3904  }
3905 
3906  if(start_i){
3907  const int level= block[0];
3908  int change, old_coeff;
3909 
3910  av_assert2(s->mb_intra);
3911 
3912  old_coeff= q*level;
3913 
3914  for(change=-1; change<=1; change+=2){
3915  int new_level= level + change;
3916  int score, new_coeff;
3917 
3918  new_coeff= q*new_level;
3919  if(new_coeff >= 2048 || new_coeff < 0)
3920  continue;
3921 
3922  score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3923  if(score<best_score){
3924  best_score= score;
3925  best_coeff= 0;
3926  best_change= change;
3927  best_unquant_change= new_coeff - old_coeff;
3928  }
3929  }
3930  }
3931 
3932  run=0;
3933  rle_index=0;
3934  run2= run_tab[rle_index++];
3935  prev_level=0;
3936  prev_run=0;
3937 
3938  for(i=start_i; i<64; i++){
3939  int j= perm_scantable[i];
3940  const int level= block[j];
3941  int change, old_coeff;
3942 
3943  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3944  break;
3945 
3946  if(level){
3947  if(level<0) old_coeff= qmul*level - qadd;
3948  else old_coeff= qmul*level + qadd;
3949  run2= run_tab[rle_index++]; //FIXME ! maybe after last
3950  }else{
3951  old_coeff=0;
3952  run2--;
3953  av_assert2(run2>=0 || i >= last_non_zero );
3954  }
3955 
3956  for(change=-1; change<=1; change+=2){
3957  int new_level= level + change;
3958  int score, new_coeff, unquant_change;
3959 
3960  score=0;
3961  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3962  continue;
3963 
3964  if(new_level){
3965  if(new_level<0) new_coeff= qmul*new_level - qadd;
3966  else new_coeff= qmul*new_level + qadd;
3967  if(new_coeff >= 2048 || new_coeff <= -2048)
3968  continue;
3969  //FIXME check for overflow
3970 
3971  if(level){
3972  if(level < 63 && level > -63){
3973  if(i < last_non_zero)
3974  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
3975  - length[UNI_AC_ENC_INDEX(run, level+64)];
3976  else
3977  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3978  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3979  }
3980  }else{
3981  av_assert2(FFABS(new_level)==1);
3982 
3983  if(analyze_gradient){
3984  int g= d1[ scantable[i] ];
3985  if(g && (g^new_level) >= 0)
3986  continue;
3987  }
3988 
3989  if(i < last_non_zero){
3990  int next_i= i + run2 + 1;
3991  int next_level= block[ perm_scantable[next_i] ] + 64;
3992 
3993  if(next_level&(~127))
3994  next_level= 0;
3995 
3996  if(next_i < last_non_zero)
3997  score += length[UNI_AC_ENC_INDEX(run, 65)]
3998  + length[UNI_AC_ENC_INDEX(run2, next_level)]
3999  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4000  else
4001  score += length[UNI_AC_ENC_INDEX(run, 65)]
4002  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4003  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4004  }else{
4005  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4006  if(prev_level){
4007  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4008  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4009  }
4010  }
4011  }
4012  }else{
4013  new_coeff=0;
4014  av_assert2(FFABS(level)==1);
4015 
4016  if(i < last_non_zero){
4017  int next_i= i + run2 + 1;
4018  int next_level= block[ perm_scantable[next_i] ] + 64;
4019 
4020  if(next_level&(~127))
4021  next_level= 0;
4022 
4023  if(next_i < last_non_zero)
4024  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4025  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4026  - length[UNI_AC_ENC_INDEX(run, 65)];
4027  else
4028  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4029  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4030  - length[UNI_AC_ENC_INDEX(run, 65)];
4031  }else{
4032  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4033  if(prev_level){
4034  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4035  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4036  }
4037  }
4038  }
4039 
4040  score *= lambda;
4041 
4042  unquant_change= new_coeff - old_coeff;
4043  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4044 
4045  score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
4046  if(score<best_score){
4047  best_score= score;
4048  best_coeff= i;
4049  best_change= change;
4050  best_unquant_change= unquant_change;
4051  }
4052  }
4053  if(level){
4054  prev_level= level + 64;
4055  if(prev_level&(~127))
4056  prev_level= 0;
4057  prev_run= run;
4058  run=0;
4059  }else{
4060  run++;
4061  }
4062  }
4063 #ifdef REFINE_STATS
4064 STOP_TIMER("iterative step")}
4065 #endif
4066 
4067  if(best_change){
4068  int j= perm_scantable[ best_coeff ];
4069 
4070  block[j] += best_change;
4071 
4072  if(best_coeff > last_non_zero){
4073  last_non_zero= best_coeff;
4074  av_assert2(block[j]);
4075 #ifdef REFINE_STATS
4076 after_last++;
4077 #endif
4078  }else{
4079 #ifdef REFINE_STATS
4080 if(block[j]){
4081  if(block[j] - best_change){
4082  if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4083  raise++;
4084  }else{
4085  lower++;
4086  }
4087  }else{
4088  from_zero++;
4089  }
4090 }else{
4091  to_zero++;
4092 }
4093 #endif
4094  for(; last_non_zero>=start_i; last_non_zero--){
4095  if(block[perm_scantable[last_non_zero]])
4096  break;
4097  }
4098  }
4099 #ifdef REFINE_STATS
4100 count++;
4101 if(256*256*256*64 % count == 0){
4102  av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4103 }
4104 #endif
4105  run=0;
4106  rle_index=0;
4107  for(i=start_i; i<=last_non_zero; i++){
4108  int j= perm_scantable[i];
4109  const int level= block[j];
4110 
4111  if(level){
4112  run_tab[rle_index++]=run;
4113  run=0;
4114  }else{
4115  run++;
4116  }
4117  }
4118 
4119  s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
4120  }else{
4121  break;
4122  }
4123  }
4124 #ifdef REFINE_STATS
4125 if(last_non_zero>0){
4126 STOP_TIMER("iterative search")
4127 }
4128 }
4129 #endif
4130 
4131  return last_non_zero;
4132 }
4133 
4135  int16_t *block, int n,
4136  int qscale, int *overflow)
4137 {
4138  int i, j, level, last_non_zero, q, start_i;
4139  const int *qmat;
4140  const uint8_t *scantable= s->intra_scantable.scantable;
4141  int bias;
4142  int max=0;
4143  unsigned int threshold1, threshold2;
4144 
4145  s->dsp.fdct (block);
4146 
4147  if(s->dct_error_sum)
4148  s->denoise_dct(s, block);
4149 
4150  if (s->mb_intra) {
4151  if (!s->h263_aic) {
4152  if (n < 4)
4153  q = s->y_dc_scale;
4154  else
4155  q = s->c_dc_scale;
4156  q = q << 3;
4157  } else
4158  /* For AIC we skip quant/dequant of INTRADC */
4159  q = 1 << 3;
4160 
4161  /* note: block[0] is assumed to be positive */
4162  block[0] = (block[0] + (q >> 1)) / q;
4163  start_i = 1;
4164  last_non_zero = 0;
4165  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4167  } else {
4168  start_i = 0;
4169  last_non_zero = -1;
4170  qmat = s->q_inter_matrix[qscale];
4172  }
4173  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4174  threshold2= (threshold1<<1);
4175  for(i=63;i>=start_i;i--) {
4176  j = scantable[i];
4177  level = block[j] * qmat[j];
4178 
4179  if(((unsigned)(level+threshold1))>threshold2){
4180  last_non_zero = i;
4181  break;
4182  }else{
4183  block[j]=0;
4184  }
4185  }
4186  for(i=start_i; i<=last_non_zero; i++) {
4187  j = scantable[i];
4188  level = block[j] * qmat[j];
4189 
4190 // if( bias+level >= (1<<QMAT_SHIFT)
4191 // || bias-level >= (1<<QMAT_SHIFT)){
4192  if(((unsigned)(level+threshold1))>threshold2){
4193  if(level>0){
4194  level= (bias + level)>>QMAT_SHIFT;
4195  block[j]= level;
4196  }else{
4197  level= (bias - level)>>QMAT_SHIFT;
4198  block[j]= -level;
4199  }
4200  max |=level;
4201  }else{
4202  block[j]=0;
4203  }
4204  }
4205  *overflow= s->max_qcoeff < max; //overflow might have happened
4206 
4207  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4209  ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4210 
4211  return last_non_zero;
4212 }
4213 
4214 #define OFFSET(x) offsetof(MpegEncContext, x)
4215 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4216 static const AVOption h263_options[] = {
4217  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4218  { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4219  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4221  { NULL },
4222 };
4223 
4224 static const AVClass h263_class = {
4225  .class_name = "H.263 encoder",
4226  .item_name = av_default_item_name,
4227  .option = h263_options,
4228  .version = LIBAVUTIL_VERSION_INT,
4229 };
4230 
4232  .name = "h263",
4233  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4234  .type = AVMEDIA_TYPE_VIDEO,
4235  .id = AV_CODEC_ID_H263,
4236  .priv_data_size = sizeof(MpegEncContext),
4238  .encode2 = ff_MPV_encode_picture,
4240  .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4241  .priv_class = &h263_class,
4242 };
4243 
4244 static const AVOption h263p_options[] = {
4245  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4246  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4247  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4248  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4250  { NULL },
4251 };
4252 static const AVClass h263p_class = {
4253  .class_name = "H.263p encoder",
4254  .item_name = av_default_item_name,
4255  .option = h263p_options,
4256  .version = LIBAVUTIL_VERSION_INT,
4257 };
4258 
4260  .name = "h263p",
4261  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4262  .type = AVMEDIA_TYPE_VIDEO,
4263  .id = AV_CODEC_ID_H263P,
4264  .priv_data_size = sizeof(MpegEncContext),
4266  .encode2 = ff_MPV_encode_picture,
4268  .capabilities = CODEC_CAP_SLICE_THREADS,
4269  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4270  .priv_class = &h263p_class,
4271 };
4272 
4273 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4274 
4276  .name = "msmpeg4v2",
4277  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4278  .type = AVMEDIA_TYPE_VIDEO,
4279  .id = AV_CODEC_ID_MSMPEG4V2,
4280  .priv_data_size = sizeof(MpegEncContext),
4282  .encode2 = ff_MPV_encode_picture,
4284  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4285  .priv_class = &msmpeg4v2_class,
4286 };
4287 
4288 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4289 
4291  .name = "msmpeg4",
4292  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4293  .type = AVMEDIA_TYPE_VIDEO,
4294  .id = AV_CODEC_ID_MSMPEG4V3,
4295  .priv_data_size = sizeof(MpegEncContext),
4297  .encode2 = ff_MPV_encode_picture,
4299  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4300  .priv_class = &msmpeg4v3_class,
4301 };
4302 
4304 
4306  .name = "wmv1",
4307  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4308  .type = AVMEDIA_TYPE_VIDEO,
4309  .id = AV_CODEC_ID_WMV1,
4310  .priv_data_size = sizeof(MpegEncContext),
4312  .encode2 = ff_MPV_encode_picture,
4314  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4315  .priv_class = &wmv1_class,
4316 };