FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
35 #include "libavutil/timer.h"
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "h264chroma.h"
39 #include "idctdsp.h"
40 #include "internal.h"
41 #include "mathops.h"
42 #include "mpegutils.h"
43 #include "mpegvideo.h"
44 #include "mjpegenc.h"
45 #include "msmpeg4.h"
46 #include "qpeldsp.h"
47 #include "thread.h"
48 #include <limits.h>
49 
51 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
52  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
53  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
54 };
55 
57 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
58  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
65  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
66 };
67 
68 static const uint8_t mpeg2_dc_scale_table1[128] = {
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
77  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
78 };
79 
80 static const uint8_t mpeg2_dc_scale_table2[128] = {
81 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
82  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
90 };
91 
92 static const uint8_t mpeg2_dc_scale_table3[128] = {
93 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
94  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 };
103 
104 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
109 };
110 
112  0, 1, 2, 3, 8, 9, 16, 17,
113  10, 11, 4, 5, 6, 7, 15, 14,
114  13, 12, 19, 18, 24, 25, 32, 33,
115  26, 27, 20, 21, 22, 23, 28, 29,
116  30, 31, 34, 35, 40, 41, 48, 49,
117  42, 43, 36, 37, 38, 39, 44, 45,
118  46, 47, 50, 51, 56, 57, 58, 59,
119  52, 53, 54, 55, 60, 61, 62, 63,
120 };
121 
123  0, 8, 16, 24, 1, 9, 2, 10,
124  17, 25, 32, 40, 48, 56, 57, 49,
125  41, 33, 26, 18, 3, 11, 4, 12,
126  19, 27, 34, 42, 50, 58, 35, 43,
127  51, 59, 20, 28, 5, 13, 6, 14,
128  21, 29, 36, 44, 52, 60, 37, 45,
129  53, 61, 22, 30, 7, 15, 23, 31,
130  38, 46, 54, 62, 39, 47, 55, 63,
131 };
132 
134  int16_t *block, int n, int qscale)
135 {
136  int i, level, nCoeffs;
137  const uint16_t *quant_matrix;
138 
139  nCoeffs= s->block_last_index[n];
140 
141  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
142  /* XXX: only mpeg1 */
143  quant_matrix = s->intra_matrix;
144  for(i=1;i<=nCoeffs;i++) {
145  int j= s->intra_scantable.permutated[i];
146  level = block[j];
147  if (level) {
148  if (level < 0) {
149  level = -level;
150  level = (int)(level * qscale * quant_matrix[j]) >> 3;
151  level = (level - 1) | 1;
152  level = -level;
153  } else {
154  level = (int)(level * qscale * quant_matrix[j]) >> 3;
155  level = (level - 1) | 1;
156  }
157  block[j] = level;
158  }
159  }
160 }
161 
163  int16_t *block, int n, int qscale)
164 {
165  int i, level, nCoeffs;
166  const uint16_t *quant_matrix;
167 
168  nCoeffs= s->block_last_index[n];
169 
170  quant_matrix = s->inter_matrix;
171  for(i=0; i<=nCoeffs; i++) {
172  int j= s->intra_scantable.permutated[i];
173  level = block[j];
174  if (level) {
175  if (level < 0) {
176  level = -level;
177  level = (((level << 1) + 1) * qscale *
178  ((int) (quant_matrix[j]))) >> 4;
179  level = (level - 1) | 1;
180  level = -level;
181  } else {
182  level = (((level << 1) + 1) * qscale *
183  ((int) (quant_matrix[j]))) >> 4;
184  level = (level - 1) | 1;
185  }
186  block[j] = level;
187  }
188  }
189 }
190 
192  int16_t *block, int n, int qscale)
193 {
194  int i, level, nCoeffs;
195  const uint16_t *quant_matrix;
196 
197  if(s->alternate_scan) nCoeffs= 63;
198  else nCoeffs= s->block_last_index[n];
199 
200  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
201  quant_matrix = s->intra_matrix;
202  for(i=1;i<=nCoeffs;i++) {
203  int j= s->intra_scantable.permutated[i];
204  level = block[j];
205  if (level) {
206  if (level < 0) {
207  level = -level;
208  level = (int)(level * qscale * quant_matrix[j]) >> 3;
209  level = -level;
210  } else {
211  level = (int)(level * qscale * quant_matrix[j]) >> 3;
212  }
213  block[j] = level;
214  }
215  }
216 }
217 
219  int16_t *block, int n, int qscale)
220 {
221  int i, level, nCoeffs;
222  const uint16_t *quant_matrix;
223  int sum=-1;
224 
225  if(s->alternate_scan) nCoeffs= 63;
226  else nCoeffs= s->block_last_index[n];
227 
228  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
229  sum += block[0];
230  quant_matrix = s->intra_matrix;
231  for(i=1;i<=nCoeffs;i++) {
232  int j= s->intra_scantable.permutated[i];
233  level = block[j];
234  if (level) {
235  if (level < 0) {
236  level = -level;
237  level = (int)(level * qscale * quant_matrix[j]) >> 3;
238  level = -level;
239  } else {
240  level = (int)(level * qscale * quant_matrix[j]) >> 3;
241  }
242  block[j] = level;
243  sum+=level;
244  }
245  }
246  block[63]^=sum&1;
247 }
248 
250  int16_t *block, int n, int qscale)
251 {
252  int i, level, nCoeffs;
253  const uint16_t *quant_matrix;
254  int sum=-1;
255 
256  if(s->alternate_scan) nCoeffs= 63;
257  else nCoeffs= s->block_last_index[n];
258 
259  quant_matrix = s->inter_matrix;
260  for(i=0; i<=nCoeffs; i++) {
261  int j= s->intra_scantable.permutated[i];
262  level = block[j];
263  if (level) {
264  if (level < 0) {
265  level = -level;
266  level = (((level << 1) + 1) * qscale *
267  ((int) (quant_matrix[j]))) >> 4;
268  level = -level;
269  } else {
270  level = (((level << 1) + 1) * qscale *
271  ((int) (quant_matrix[j]))) >> 4;
272  }
273  block[j] = level;
274  sum+=level;
275  }
276  }
277  block[63]^=sum&1;
278 }
279 
281  int16_t *block, int n, int qscale)
282 {
283  int i, level, qmul, qadd;
284  int nCoeffs;
285 
286  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
287 
288  qmul = qscale << 1;
289 
290  if (!s->h263_aic) {
291  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
292  qadd = (qscale - 1) | 1;
293  }else{
294  qadd = 0;
295  }
296  if(s->ac_pred)
297  nCoeffs=63;
298  else
299  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
300 
301  for(i=1; i<=nCoeffs; i++) {
302  level = block[i];
303  if (level) {
304  if (level < 0) {
305  level = level * qmul - qadd;
306  } else {
307  level = level * qmul + qadd;
308  }
309  block[i] = level;
310  }
311  }
312 }
313 
315  int16_t *block, int n, int qscale)
316 {
317  int i, level, qmul, qadd;
318  int nCoeffs;
319 
320  av_assert2(s->block_last_index[n]>=0);
321 
322  qadd = (qscale - 1) | 1;
323  qmul = qscale << 1;
324 
325  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
326 
327  for(i=0; i<=nCoeffs; i++) {
328  level = block[i];
329  if (level) {
330  if (level < 0) {
331  level = level * qmul - qadd;
332  } else {
333  level = level * qmul + qadd;
334  }
335  block[i] = level;
336  }
337  }
338 }
339 
340 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
341  int (*mv)[2][4][2],
342  int mb_x, int mb_y, int mb_intra, int mb_skipped)
343 {
344  MpegEncContext *s = opaque;
345 
346  s->mv_dir = mv_dir;
347  s->mv_type = mv_type;
348  s->mb_intra = mb_intra;
349  s->mb_skipped = mb_skipped;
350  s->mb_x = mb_x;
351  s->mb_y = mb_y;
352  memcpy(s->mv, mv, sizeof(*mv));
353 
356 
357  s->bdsp.clear_blocks(s->block[0]);
358 
359  s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
360  s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
361  s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
362 
363  if (ref)
364  av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
365  ff_mpv_decode_mb(s, s->block);
366 }
367 
368 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
369 {
370  while(h--)
371  memset(dst + h*linesize, 128, 16);
372 }
373 
374 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
375 {
376  while(h--)
377  memset(dst + h*linesize, 128, 8);
378 }
379 
380 /* init common dct for both encoder and decoder */
382 {
383  ff_blockdsp_init(&s->bdsp, s->avctx);
384  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
385  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
386  ff_me_cmp_init(&s->mecc, s->avctx);
389 
390  if (s->avctx->debug & FF_DEBUG_NOMC) {
391  int i;
392  for (i=0; i<4; i++) {
393  s->hdsp.avg_pixels_tab[0][i] = gray16;
394  s->hdsp.put_pixels_tab[0][i] = gray16;
395  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
396 
397  s->hdsp.avg_pixels_tab[1][i] = gray8;
398  s->hdsp.put_pixels_tab[1][i] = gray8;
399  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
400  }
401  }
402 
408  if (s->flags & CODEC_FLAG_BITEXACT)
411 
412  if (HAVE_INTRINSICS_NEON)
414 
415  if (ARCH_ALPHA)
417  if (ARCH_ARM)
419  if (ARCH_PPC)
421  if (ARCH_X86)
423 
424  return 0;
425 }
426 
428 {
429  ff_idctdsp_init(&s->idsp, s->avctx);
430 
431  /* load & permutate scantables
432  * note: only wmv uses different ones
433  */
434  if (s->alternate_scan) {
437  } else {
440  }
443 }
444 
445 static int frame_size_alloc(MpegEncContext *s, int linesize)
446 {
447  int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
448 
450  return 0;
451 
452  if (linesize < 24) {
453  av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
454  return AVERROR_PATCHWELCOME;
455  }
456 
457  // edge emu needs blocksize + filter length - 1
458  // (= 17x17 for halfpel / 21x21 for h264)
459  // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
460  // at uvlinesize. It supports only YUV420 so 24x24 is enough
461  // linesize * interlaced * MBsize
462  // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
463  FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 68,
464  fail);
465 
466  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
467  fail)
468  s->me.temp = s->me.scratchpad;
469  s->rd_scratchpad = s->me.scratchpad;
470  s->b_scratchpad = s->me.scratchpad;
471  s->obmc_scratchpad = s->me.scratchpad + 16;
472 
473  return 0;
474 fail:
476  return AVERROR(ENOMEM);
477 }
478 
479 /**
480  * Allocate a frame buffer
481  */
483 {
484  int edges_needed = av_codec_is_encoder(s->avctx->codec);
485  int r, ret;
486 
487  pic->tf.f = pic->f;
488  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
490  s->codec_id != AV_CODEC_ID_MSS2) {
491  if (edges_needed) {
492  pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
493  pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
494  }
495 
496  r = ff_thread_get_buffer(s->avctx, &pic->tf,
497  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
498  } else {
499  pic->f->width = s->avctx->width;
500  pic->f->height = s->avctx->height;
501  pic->f->format = s->avctx->pix_fmt;
502  r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
503  }
504 
505  if (r < 0 || !pic->f->buf[0]) {
506  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
507  r, pic->f->data[0]);
508  return -1;
509  }
510 
511  if (edges_needed) {
512  int i;
513  for (i = 0; pic->f->data[i]; i++) {
514  int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
515  pic->f->linesize[i] +
516  (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
517  pic->f->data[i] += offset;
518  }
519  pic->f->width = s->avctx->width;
520  pic->f->height = s->avctx->height;
521  }
522 
523  if (s->avctx->hwaccel) {
524  assert(!pic->hwaccel_picture_private);
527  if (!pic->hwaccel_priv_buf) {
528  av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
529  return -1;
530  }
532  }
533  }
534 
535  if (s->linesize && (s->linesize != pic->f->linesize[0] ||
536  s->uvlinesize != pic->f->linesize[1])) {
538  "get_buffer() failed (stride changed)\n");
539  ff_mpeg_unref_picture(s, pic);
540  return -1;
541  }
542 
543  if (pic->f->linesize[1] != pic->f->linesize[2]) {
545  "get_buffer() failed (uv stride mismatch)\n");
546  ff_mpeg_unref_picture(s, pic);
547  return -1;
548  }
549 
550  if (!s->edge_emu_buffer &&
551  (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
553  "get_buffer() failed to allocate context scratch buffers.\n");
554  ff_mpeg_unref_picture(s, pic);
555  return ret;
556  }
557 
558  return 0;
559 }
560 
562 {
563  int i;
564 
565  pic->alloc_mb_width =
566  pic->alloc_mb_height = 0;
567 
574 
575  for (i = 0; i < 2; i++) {
577  av_buffer_unref(&pic->ref_index_buf[i]);
578  }
579 }
580 
582 {
583  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
584  const int mb_array_size = s->mb_stride * s->mb_height;
585  const int b8_array_size = s->b8_stride * s->mb_height * 2;
586  int i;
587 
588 
589  pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
590  pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
591  pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
592  sizeof(uint32_t));
593  if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
594  return AVERROR(ENOMEM);
595 
596  if (s->encoding) {
597  pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
598  pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
599  pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
600  if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
601  return AVERROR(ENOMEM);
602  }
603 
604  if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv ||
606  int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
607  int ref_index_size = 4 * mb_array_size;
608 
609  for (i = 0; mv_size && i < 2; i++) {
610  pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
611  pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
612  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
613  return AVERROR(ENOMEM);
614  }
615  }
616 
617  pic->alloc_mb_width = s->mb_width;
618  pic->alloc_mb_height = s->mb_height;
619 
620  return 0;
621 }
622 
624 {
625  int ret, i;
626 #define MAKE_WRITABLE(table) \
627 do {\
628  if (pic->table &&\
629  (ret = av_buffer_make_writable(&pic->table)) < 0)\
630  return ret;\
631 } while (0)
632 
633  MAKE_WRITABLE(mb_var_buf);
634  MAKE_WRITABLE(mc_mb_var_buf);
635  MAKE_WRITABLE(mb_mean_buf);
636  MAKE_WRITABLE(mbskip_table_buf);
637  MAKE_WRITABLE(qscale_table_buf);
638  MAKE_WRITABLE(mb_type_buf);
639 
640  for (i = 0; i < 2; i++) {
641  MAKE_WRITABLE(motion_val_buf[i]);
642  MAKE_WRITABLE(ref_index_buf[i]);
643  }
644 
645  return 0;
646 }
647 
648 /**
649  * Allocate a Picture.
650  * The pixels are allocated/set by calling get_buffer() if shared = 0
651  */
652 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
653 {
654  int i, ret;
655 
656  if (pic->qscale_table_buf)
657  if ( pic->alloc_mb_width != s->mb_width
658  || pic->alloc_mb_height != s->mb_height)
660 
661  if (shared) {
662  av_assert0(pic->f->data[0]);
663  pic->shared = 1;
664  } else {
665  av_assert0(!pic->f->buf[0]);
666 
667  if (alloc_frame_buffer(s, pic) < 0)
668  return -1;
669 
670  s->linesize = pic->f->linesize[0];
671  s->uvlinesize = pic->f->linesize[1];
672  }
673 
674  if (!pic->qscale_table_buf)
675  ret = alloc_picture_tables(s, pic);
676  else
677  ret = make_tables_writable(pic);
678  if (ret < 0)
679  goto fail;
680 
681  if (s->encoding) {
682  pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
683  pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
684  pic->mb_mean = pic->mb_mean_buf->data;
685  }
686 
687  pic->mbskip_table = pic->mbskip_table_buf->data;
688  pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
689  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
690 
691  if (pic->motion_val_buf[0]) {
692  for (i = 0; i < 2; i++) {
693  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
694  pic->ref_index[i] = pic->ref_index_buf[i]->data;
695  }
696  }
697 
698  return 0;
699 fail:
700  av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
701  ff_mpeg_unref_picture(s, pic);
703  return AVERROR(ENOMEM);
704 }
705 
706 /**
707  * Deallocate a picture.
708  */
710 {
711  int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
712 
713  pic->tf.f = pic->f;
714  /* WM Image / Screen codecs allocate internal buffers with different
715  * dimensions / colorspaces; ignore user-defined callbacks for these. */
716  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
719  ff_thread_release_buffer(s->avctx, &pic->tf);
720  else if (pic->f)
721  av_frame_unref(pic->f);
722 
724 
725  if (pic->needs_realloc)
727 
728  memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
729 }
730 
732 {
733  int i;
734 
735 #define UPDATE_TABLE(table)\
736 do {\
737  if (src->table &&\
738  (!dst->table || dst->table->buffer != src->table->buffer)) {\
739  av_buffer_unref(&dst->table);\
740  dst->table = av_buffer_ref(src->table);\
741  if (!dst->table) {\
742  ff_free_picture_tables(dst);\
743  return AVERROR(ENOMEM);\
744  }\
745  }\
746 } while (0)
747 
748  UPDATE_TABLE(mb_var_buf);
749  UPDATE_TABLE(mc_mb_var_buf);
750  UPDATE_TABLE(mb_mean_buf);
751  UPDATE_TABLE(mbskip_table_buf);
752  UPDATE_TABLE(qscale_table_buf);
753  UPDATE_TABLE(mb_type_buf);
754  for (i = 0; i < 2; i++) {
755  UPDATE_TABLE(motion_val_buf[i]);
756  UPDATE_TABLE(ref_index_buf[i]);
757  }
758 
759  dst->mb_var = src->mb_var;
760  dst->mc_mb_var = src->mc_mb_var;
761  dst->mb_mean = src->mb_mean;
762  dst->mbskip_table = src->mbskip_table;
763  dst->qscale_table = src->qscale_table;
764  dst->mb_type = src->mb_type;
765  for (i = 0; i < 2; i++) {
766  dst->motion_val[i] = src->motion_val[i];
767  dst->ref_index[i] = src->ref_index[i];
768  }
769 
770  dst->alloc_mb_width = src->alloc_mb_width;
771  dst->alloc_mb_height = src->alloc_mb_height;
772 
773  return 0;
774 }
775 
777 {
778  int ret;
779 
780  av_assert0(!dst->f->buf[0]);
781  av_assert0(src->f->buf[0]);
782 
783  src->tf.f = src->f;
784  dst->tf.f = dst->f;
785  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
786  if (ret < 0)
787  goto fail;
788 
789  ret = update_picture_tables(dst, src);
790  if (ret < 0)
791  goto fail;
792 
793  if (src->hwaccel_picture_private) {
795  if (!dst->hwaccel_priv_buf)
796  goto fail;
798  }
799 
800  dst->field_picture = src->field_picture;
801  dst->mb_var_sum = src->mb_var_sum;
802  dst->mc_mb_var_sum = src->mc_mb_var_sum;
803  dst->b_frame_score = src->b_frame_score;
804  dst->needs_realloc = src->needs_realloc;
805  dst->reference = src->reference;
806  dst->shared = src->shared;
807 
808  return 0;
809 fail:
810  ff_mpeg_unref_picture(s, dst);
811  return ret;
812 }
813 
815 {
816  int16_t (*tmp)[64];
817 
818  tmp = s->pblocks[4];
819  s->pblocks[4] = s->pblocks[5];
820  s->pblocks[5] = tmp;
821 }
822 
824 {
825  int y_size = s->b8_stride * (2 * s->mb_height + 1);
826  int c_size = s->mb_stride * (s->mb_height + 1);
827  int yc_size = y_size + 2 * c_size;
828  int i;
829 
830  if (s->mb_height & 1)
831  yc_size += 2*s->b8_stride + 2*s->mb_stride;
832 
833  s->edge_emu_buffer =
834  s->me.scratchpad =
835  s->me.temp =
836  s->rd_scratchpad =
837  s->b_scratchpad =
838  s->obmc_scratchpad = NULL;
839 
840  if (s->encoding) {
841  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
842  ME_MAP_SIZE * sizeof(uint32_t), fail)
844  ME_MAP_SIZE * sizeof(uint32_t), fail)
845  if (s->avctx->noise_reduction) {
847  2 * 64 * sizeof(int), fail)
848  }
849  }
850  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
851  s->block = s->blocks[0];
852 
853  for (i = 0; i < 12; i++) {
854  s->pblocks[i] = &s->block[i];
855  }
856  if (s->avctx->codec_tag == AV_RL32("VCR2"))
857  exchange_uv(s);
858 
859  if (s->out_format == FMT_H263) {
860  /* ac values */
862  yc_size * sizeof(int16_t) * 16, fail);
863  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
864  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
865  s->ac_val[2] = s->ac_val[1] + c_size;
866  }
867 
868  return 0;
869 fail:
870  return -1; // free() through ff_mpv_common_end()
871 }
872 
874 {
875  if (!s)
876  return;
877 
879  av_freep(&s->me.scratchpad);
880  s->me.temp =
881  s->rd_scratchpad =
882  s->b_scratchpad =
883  s->obmc_scratchpad = NULL;
884 
885  av_freep(&s->dct_error_sum);
886  av_freep(&s->me.map);
887  av_freep(&s->me.score_map);
888  av_freep(&s->blocks);
889  av_freep(&s->ac_val_base);
890  s->block = NULL;
891 }
892 
894 {
895 #define COPY(a) bak->a = src->a
896  COPY(edge_emu_buffer);
897  COPY(me.scratchpad);
898  COPY(me.temp);
899  COPY(rd_scratchpad);
900  COPY(b_scratchpad);
901  COPY(obmc_scratchpad);
902  COPY(me.map);
903  COPY(me.score_map);
904  COPY(blocks);
905  COPY(block);
906  COPY(start_mb_y);
907  COPY(end_mb_y);
908  COPY(me.map_generation);
909  COPY(pb);
910  COPY(dct_error_sum);
911  COPY(dct_count[0]);
912  COPY(dct_count[1]);
913  COPY(ac_val_base);
914  COPY(ac_val[0]);
915  COPY(ac_val[1]);
916  COPY(ac_val[2]);
917 #undef COPY
918 }
919 
921 {
922  MpegEncContext bak;
923  int i, ret;
924  // FIXME copy only needed parts
925  // START_TIMER
926  backup_duplicate_context(&bak, dst);
927  memcpy(dst, src, sizeof(MpegEncContext));
928  backup_duplicate_context(dst, &bak);
929  for (i = 0; i < 12; i++) {
930  dst->pblocks[i] = &dst->block[i];
931  }
932  if (dst->avctx->codec_tag == AV_RL32("VCR2"))
933  exchange_uv(dst);
934  if (!dst->edge_emu_buffer &&
935  (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
936  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
937  "scratch buffers.\n");
938  return ret;
939  }
940  // STOP_TIMER("update_duplicate_context")
941  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
942  return 0;
943 }
944 
946  const AVCodecContext *src)
947 {
948  int i, ret;
949  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
950 
951  if (dst == src)
952  return 0;
953 
954  av_assert0(s != s1);
955 
956  // FIXME can parameters change on I-frames?
957  // in that case dst may need a reinit
958  if (!s->context_initialized) {
959  memcpy(s, s1, sizeof(MpegEncContext));
960 
961  s->avctx = dst;
962  s->bitstream_buffer = NULL;
964 
965  if (s1->context_initialized){
966 // s->picture_range_start += MAX_PICTURE_COUNT;
967 // s->picture_range_end += MAX_PICTURE_COUNT;
968  ff_mpv_idct_init(s);
969  if((ret = ff_mpv_common_init(s)) < 0){
970  memset(s, 0, sizeof(MpegEncContext));
971  s->avctx = dst;
972  return ret;
973  }
974  }
975  }
976 
977  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
978  s->context_reinit = 0;
979  s->height = s1->height;
980  s->width = s1->width;
981  if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
982  return ret;
983  }
984 
985  s->avctx->coded_height = s1->avctx->coded_height;
986  s->avctx->coded_width = s1->avctx->coded_width;
987  s->avctx->width = s1->avctx->width;
988  s->avctx->height = s1->avctx->height;
989 
990  s->coded_picture_number = s1->coded_picture_number;
991  s->picture_number = s1->picture_number;
992 
993  av_assert0(!s->picture || s->picture != s1->picture);
994  if(s->picture)
995  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
996  ff_mpeg_unref_picture(s, &s->picture[i]);
997  if (s1->picture[i].f->buf[0] &&
998  (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
999  return ret;
1000  }
1001 
1002 #define UPDATE_PICTURE(pic)\
1003 do {\
1004  ff_mpeg_unref_picture(s, &s->pic);\
1005  if (s1->pic.f && s1->pic.f->buf[0])\
1006  ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
1007  else\
1008  ret = update_picture_tables(&s->pic, &s1->pic);\
1009  if (ret < 0)\
1010  return ret;\
1011 } while (0)
1012 
1013  UPDATE_PICTURE(current_picture);
1014  UPDATE_PICTURE(last_picture);
1015  UPDATE_PICTURE(next_picture);
1016 
1017  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1018  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1019  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1020 
1021  // Error/bug resilience
1022  s->next_p_frame_damaged = s1->next_p_frame_damaged;
1023  s->workaround_bugs = s1->workaround_bugs;
1024  s->padding_bug_score = s1->padding_bug_score;
1025 
1026  // MPEG4 timing info
1027  memcpy(&s->last_time_base, &s1->last_time_base,
1028  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1029  (char *) &s1->last_time_base);
1030 
1031  // B-frame info
1032  s->max_b_frames = s1->max_b_frames;
1033  s->low_delay = s1->low_delay;
1034  s->droppable = s1->droppable;
1035 
1036  // DivX handling (doesn't work)
1037  s->divx_packed = s1->divx_packed;
1038 
1039  if (s1->bitstream_buffer) {
1040  if (s1->bitstream_buffer_size +
1044  s1->allocated_bitstream_buffer_size);
1045  s->bitstream_buffer_size = s1->bitstream_buffer_size;
1046  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1047  s1->bitstream_buffer_size);
1048  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1050  }
1051 
1052  // linesize dependend scratch buffer allocation
1053  if (!s->edge_emu_buffer)
1054  if (s1->linesize) {
1055  if (frame_size_alloc(s, s1->linesize) < 0) {
1056  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1057  "scratch buffers.\n");
1058  return AVERROR(ENOMEM);
1059  }
1060  } else {
1061  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1062  "be allocated due to unknown size.\n");
1063  }
1064 
1065  // MPEG2/interlacing info
1066  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1067  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1068 
1069  if (!s1->first_field) {
1070  s->last_pict_type = s1->pict_type;
1071  if (s1->current_picture_ptr)
1072  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1073  }
1074 
1075  return 0;
1076 }
1077 
1078 /**
1079  * Set the given MpegEncContext to common defaults
1080  * (same for encoding and decoding).
1081  * The changed fields will not depend upon the
1082  * prior state of the MpegEncContext.
1083  */
1085 {
1086  s->y_dc_scale_table =
1089  s->progressive_frame = 1;
1090  s->progressive_sequence = 1;
1092 
1093  s->coded_picture_number = 0;
1094  s->picture_number = 0;
1095 
1096  s->f_code = 1;
1097  s->b_code = 1;
1098 
1099  s->slice_context_count = 1;
1100 }
1101 
1102 /**
1103  * Set the given MpegEncContext to defaults for decoding.
1104  * the changed fields will not depend upon
1105  * the prior state of the MpegEncContext.
1106  */
1108 {
1110 }
1111 
1113 {
1114  ERContext *er = &s->er;
1115  int mb_array_size = s->mb_height * s->mb_stride;
1116  int i;
1117 
1118  er->avctx = s->avctx;
1119  er->mecc = &s->mecc;
1120 
1121  er->mb_index2xy = s->mb_index2xy;
1122  er->mb_num = s->mb_num;
1123  er->mb_width = s->mb_width;
1124  er->mb_height = s->mb_height;
1125  er->mb_stride = s->mb_stride;
1126  er->b8_stride = s->b8_stride;
1127 
1129  er->error_status_table = av_mallocz(mb_array_size);
1130  if (!er->er_temp_buffer || !er->error_status_table)
1131  goto fail;
1132 
1133  er->mbskip_table = s->mbskip_table;
1134  er->mbintra_table = s->mbintra_table;
1135 
1136  for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1137  er->dc_val[i] = s->dc_val[i];
1138 
1140  er->opaque = s;
1141 
1142  return 0;
1143 fail:
1144  av_freep(&er->er_temp_buffer);
1146  return AVERROR(ENOMEM);
1147 }
1148 
1149 /**
1150  * Initialize and allocates MpegEncContext fields dependent on the resolution.
1151  */
1153 {
1154  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1155 
1156  s->mb_width = (s->width + 15) / 16;
1157  s->mb_stride = s->mb_width + 1;
1158  s->b8_stride = s->mb_width * 2 + 1;
1159  mb_array_size = s->mb_height * s->mb_stride;
1160  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1161 
1162  /* set default edge pos, will be overridden
1163  * in decode_header if needed */
1164  s->h_edge_pos = s->mb_width * 16;
1165  s->v_edge_pos = s->mb_height * 16;
1166 
1167  s->mb_num = s->mb_width * s->mb_height;
1168 
1169  s->block_wrap[0] =
1170  s->block_wrap[1] =
1171  s->block_wrap[2] =
1172  s->block_wrap[3] = s->b8_stride;
1173  s->block_wrap[4] =
1174  s->block_wrap[5] = s->mb_stride;
1175 
1176  y_size = s->b8_stride * (2 * s->mb_height + 1);
1177  c_size = s->mb_stride * (s->mb_height + 1);
1178  yc_size = y_size + 2 * c_size;
1179 
1180  if (s->mb_height & 1)
1181  yc_size += 2*s->b8_stride + 2*s->mb_stride;
1182 
1183  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1184  for (y = 0; y < s->mb_height; y++)
1185  for (x = 0; x < s->mb_width; x++)
1186  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1187 
1188  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1189 
1190  if (s->encoding) {
1191  /* Allocate MV tables */
1192  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1193  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1194  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1195  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1196  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1197  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1198  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1204 
1205  /* Allocate MB type table */
1206  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1207 
1208  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1209 
1211  mb_array_size * sizeof(float), fail);
1213  mb_array_size * sizeof(float), fail);
1214 
1215  }
1216 
1217  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1219  /* interlaced direct mode decoding tables */
1220  for (i = 0; i < 2; i++) {
1221  int j, k;
1222  for (j = 0; j < 2; j++) {
1223  for (k = 0; k < 2; k++) {
1225  s->b_field_mv_table_base[i][j][k],
1226  mv_table_size * 2 * sizeof(int16_t),
1227  fail);
1228  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1229  s->mb_stride + 1;
1230  }
1231  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1232  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1233  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1234  }
1235  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1236  }
1237  }
1238  if (s->out_format == FMT_H263) {
1239  /* cbp values */
1240  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1241  s->coded_block = s->coded_block_base + s->b8_stride + 1;
1242 
1243  /* cbp, ac_pred, pred_dir */
1244  FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1245  FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1246  }
1247 
1248  if (s->h263_pred || s->h263_plus || !s->encoding) {
1249  /* dc values */
1250  // MN: we need these for error resilience of intra-frames
1251  FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1252  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1253  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1254  s->dc_val[2] = s->dc_val[1] + c_size;
1255  for (i = 0; i < yc_size; i++)
1256  s->dc_val_base[i] = 1024;
1257  }
1258 
1259  /* which mb is a intra block */
1260  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1261  memset(s->mbintra_table, 1, mb_array_size);
1262 
1263  /* init macroblock skip table */
1264  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1265  // Note the + 1 is for a quicker mpeg4 slice_end detection
1266 
1267  return init_er(s);
1268 fail:
1269  return AVERROR(ENOMEM);
1270 }
1271 
1272 /**
1273  * init common structure for both encoder and decoder.
1274  * this assumes that some variables like width/height are already set
1275  */
1277 {
1278  int i;
1279  int nb_slices = (HAVE_THREADS &&
1281  s->avctx->thread_count : 1;
1282 
1283  if (s->encoding && s->avctx->slices)
1284  nb_slices = s->avctx->slices;
1285 
1287  s->mb_height = (s->height + 31) / 32 * 2;
1288  else
1289  s->mb_height = (s->height + 15) / 16;
1290 
1291  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1293  "decoding to AV_PIX_FMT_NONE is not supported.\n");
1294  return -1;
1295  }
1296 
1297  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1298  int max_slices;
1299  if (s->mb_height)
1300  max_slices = FFMIN(MAX_THREADS, s->mb_height);
1301  else
1302  max_slices = MAX_THREADS;
1303  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1304  " reducing to %d\n", nb_slices, max_slices);
1305  nb_slices = max_slices;
1306  }
1307 
1308  if ((s->width || s->height) &&
1309  av_image_check_size(s->width, s->height, 0, s->avctx))
1310  return -1;
1311 
1312  dct_init(s);
1313 
1314  s->flags = s->avctx->flags;
1315  s->flags2 = s->avctx->flags2;
1316 
1317  /* set chroma shifts */
1319  &s->chroma_x_shift,
1320  &s->chroma_y_shift);
1321 
1322  /* convert fourcc to upper case */
1324 
1326 
1328  MAX_PICTURE_COUNT * sizeof(Picture), fail);
1329  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1330  s->picture[i].f = av_frame_alloc();
1331  if (!s->picture[i].f)
1332  goto fail;
1333  }
1334  memset(&s->next_picture, 0, sizeof(s->next_picture));
1335  memset(&s->last_picture, 0, sizeof(s->last_picture));
1336  memset(&s->current_picture, 0, sizeof(s->current_picture));
1337  memset(&s->new_picture, 0, sizeof(s->new_picture));
1338  s->next_picture.f = av_frame_alloc();
1339  if (!s->next_picture.f)
1340  goto fail;
1341  s->last_picture.f = av_frame_alloc();
1342  if (!s->last_picture.f)
1343  goto fail;
1345  if (!s->current_picture.f)
1346  goto fail;
1347  s->new_picture.f = av_frame_alloc();
1348  if (!s->new_picture.f)
1349  goto fail;
1350 
1351  if (init_context_frame(s))
1352  goto fail;
1353 
1354  s->parse_context.state = -1;
1355 
1356  s->context_initialized = 1;
1357  s->thread_context[0] = s;
1358 
1359 // if (s->width && s->height) {
1360  if (nb_slices > 1) {
1361  for (i = 1; i < nb_slices; i++) {
1362  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1363  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1364  }
1365 
1366  for (i = 0; i < nb_slices; i++) {
1367  if (init_duplicate_context(s->thread_context[i]) < 0)
1368  goto fail;
1369  s->thread_context[i]->start_mb_y =
1370  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1371  s->thread_context[i]->end_mb_y =
1372  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1373  }
1374  } else {
1375  if (init_duplicate_context(s) < 0)
1376  goto fail;
1377  s->start_mb_y = 0;
1378  s->end_mb_y = s->mb_height;
1379  }
1380  s->slice_context_count = nb_slices;
1381 // }
1382 
1383  return 0;
1384  fail:
1385  ff_mpv_common_end(s);
1386  return -1;
1387 }
1388 
1389 /**
1390  * Frees and resets MpegEncContext fields depending on the resolution.
1391  * Is used during resolution changes to avoid a full reinitialization of the
1392  * codec.
1393  */
1395 {
1396  int i, j, k;
1397 
1398  av_freep(&s->mb_type);
1405  s->p_mv_table = NULL;
1406  s->b_forw_mv_table = NULL;
1407  s->b_back_mv_table = NULL;
1408  s->b_bidir_forw_mv_table = NULL;
1409  s->b_bidir_back_mv_table = NULL;
1410  s->b_direct_mv_table = NULL;
1411  for (i = 0; i < 2; i++) {
1412  for (j = 0; j < 2; j++) {
1413  for (k = 0; k < 2; k++) {
1414  av_freep(&s->b_field_mv_table_base[i][j][k]);
1415  s->b_field_mv_table[i][j][k] = NULL;
1416  }
1417  av_freep(&s->b_field_select_table[i][j]);
1418  av_freep(&s->p_field_mv_table_base[i][j]);
1419  s->p_field_mv_table[i][j] = NULL;
1420  }
1422  }
1423 
1424  av_freep(&s->dc_val_base);
1426  av_freep(&s->mbintra_table);
1427  av_freep(&s->cbp_table);
1428  av_freep(&s->pred_dir_table);
1429 
1430  av_freep(&s->mbskip_table);
1431 
1433  av_freep(&s->er.er_temp_buffer);
1434  av_freep(&s->mb_index2xy);
1435  av_freep(&s->lambda_table);
1436 
1437  av_freep(&s->cplx_tab);
1438  av_freep(&s->bits_tab);
1439 
1440  s->linesize = s->uvlinesize = 0;
1441 
1442  return 0;
1443 }
1444 
1446 {
1447  int i, err = 0;
1448 
1449  if (s->slice_context_count > 1) {
1450  for (i = 0; i < s->slice_context_count; i++) {
1452  }
1453  for (i = 1; i < s->slice_context_count; i++) {
1454  av_freep(&s->thread_context[i]);
1455  }
1456  } else
1458 
1459  if ((err = free_context_frame(s)) < 0)
1460  return err;
1461 
1462  if (s->picture)
1463  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1464  s->picture[i].needs_realloc = 1;
1465  }
1466 
1467  s->last_picture_ptr =
1468  s->next_picture_ptr =
1469  s->current_picture_ptr = NULL;
1470 
1471  // init
1473  s->mb_height = (s->height + 31) / 32 * 2;
1474  else
1475  s->mb_height = (s->height + 15) / 16;
1476 
1477  if ((s->width || s->height) &&
1478  av_image_check_size(s->width, s->height, 0, s->avctx))
1479  return AVERROR_INVALIDDATA;
1480 
1481  if ((err = init_context_frame(s)))
1482  goto fail;
1483 
1484  s->thread_context[0] = s;
1485 
1486  if (s->width && s->height) {
1487  int nb_slices = s->slice_context_count;
1488  if (nb_slices > 1) {
1489  for (i = 1; i < nb_slices; i++) {
1490  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1491  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1492  }
1493 
1494  for (i = 0; i < nb_slices; i++) {
1495  if (init_duplicate_context(s->thread_context[i]) < 0)
1496  goto fail;
1497  s->thread_context[i]->start_mb_y =
1498  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1499  s->thread_context[i]->end_mb_y =
1500  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1501  }
1502  } else {
1503  err = init_duplicate_context(s);
1504  if (err < 0)
1505  goto fail;
1506  s->start_mb_y = 0;
1507  s->end_mb_y = s->mb_height;
1508  }
1509  s->slice_context_count = nb_slices;
1510  }
1511 
1512  return 0;
1513  fail:
1514  ff_mpv_common_end(s);
1515  return err;
1516 }
1517 
1518 /* init common structure for both encoder and decoder */
1520 {
1521  int i;
1522 
1523  if (s->slice_context_count > 1) {
1524  for (i = 0; i < s->slice_context_count; i++) {
1526  }
1527  for (i = 1; i < s->slice_context_count; i++) {
1528  av_freep(&s->thread_context[i]);
1529  }
1530  s->slice_context_count = 1;
1531  } else free_duplicate_context(s);
1532 
1534  s->parse_context.buffer_size = 0;
1535 
1538 
1539  if (s->picture) {
1540  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1542  ff_mpeg_unref_picture(s, &s->picture[i]);
1543  av_frame_free(&s->picture[i].f);
1544  }
1545  }
1546  av_freep(&s->picture);
1559 
1560  free_context_frame(s);
1561 
1562  s->context_initialized = 0;
1563  s->last_picture_ptr =
1564  s->next_picture_ptr =
1565  s->current_picture_ptr = NULL;
1566  s->linesize = s->uvlinesize = 0;
1567 }
1568 
1570  uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1571 {
1572  int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1573  uint8_t index_run[MAX_RUN + 1];
1574  int last, run, level, start, end, i;
1575 
1576  /* If table is static, we can quit if rl->max_level[0] is not NULL */
1577  if (static_store && rl->max_level[0])
1578  return;
1579 
1580  /* compute max_level[], max_run[] and index_run[] */
1581  for (last = 0; last < 2; last++) {
1582  if (last == 0) {
1583  start = 0;
1584  end = rl->last;
1585  } else {
1586  start = rl->last;
1587  end = rl->n;
1588  }
1589 
1590  memset(max_level, 0, MAX_RUN + 1);
1591  memset(max_run, 0, MAX_LEVEL + 1);
1592  memset(index_run, rl->n, MAX_RUN + 1);
1593  for (i = start; i < end; i++) {
1594  run = rl->table_run[i];
1595  level = rl->table_level[i];
1596  if (index_run[run] == rl->n)
1597  index_run[run] = i;
1598  if (level > max_level[run])
1599  max_level[run] = level;
1600  if (run > max_run[level])
1601  max_run[level] = run;
1602  }
1603  if (static_store)
1604  rl->max_level[last] = static_store[last];
1605  else
1606  rl->max_level[last] = av_malloc(MAX_RUN + 1);
1607  memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1608  if (static_store)
1609  rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1610  else
1611  rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1612  memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1613  if (static_store)
1614  rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1615  else
1616  rl->index_run[last] = av_malloc(MAX_RUN + 1);
1617  memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1618  }
1619 }
1620 
1622 {
1623  int i, q;
1624 
1625  for (q = 0; q < 32; q++) {
1626  int qmul = q * 2;
1627  int qadd = (q - 1) | 1;
1628 
1629  if (q == 0) {
1630  qmul = 1;
1631  qadd = 0;
1632  }
1633  for (i = 0; i < rl->vlc.table_size; i++) {
1634  int code = rl->vlc.table[i][0];
1635  int len = rl->vlc.table[i][1];
1636  int level, run;
1637 
1638  if (len == 0) { // illegal code
1639  run = 66;
1640  level = MAX_LEVEL;
1641  } else if (len < 0) { // more bits needed
1642  run = 0;
1643  level = code;
1644  } else {
1645  if (code == rl->n) { // esc
1646  run = 66;
1647  level = 0;
1648  } else {
1649  run = rl->table_run[code] + 1;
1650  level = rl->table_level[code] * qmul + qadd;
1651  if (code >= rl->last) run += 192;
1652  }
1653  }
1654  rl->rl_vlc[q][i].len = len;
1655  rl->rl_vlc[q][i].level = level;
1656  rl->rl_vlc[q][i].run = run;
1657  }
1658  }
1659 }
1660 
1662 {
1663  int i;
1664 
1665  /* release non reference frames */
1666  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1667  if (!s->picture[i].reference)
1668  ff_mpeg_unref_picture(s, &s->picture[i]);
1669  }
1670 }
1671 
1672 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1673 {
1674  if (pic == s->last_picture_ptr)
1675  return 0;
1676  if (!pic->f->buf[0])
1677  return 1;
1678  if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1679  return 1;
1680  return 0;
1681 }
1682 
1683 static int find_unused_picture(MpegEncContext *s, int shared)
1684 {
1685  int i;
1686 
1687  if (shared) {
1688  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1689  if (!s->picture[i].f->buf[0] && &s->picture[i] != s->last_picture_ptr)
1690  return i;
1691  }
1692  } else {
1693  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1694  if (pic_is_unused(s, &s->picture[i]))
1695  return i;
1696  }
1697  }
1698 
1700  "Internal error, picture buffer overflow\n");
1701  /* We could return -1, but the codec would crash trying to draw into a
1702  * non-existing frame anyway. This is safer than waiting for a random crash.
1703  * Also the return of this is never useful, an encoder must only allocate
1704  * as much as allowed in the specification. This has no relationship to how
1705  * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1706  * enough for such valid streams).
1707  * Plus, a decoder has to check stream validity and remove frames if too
1708  * many reference frames are around. Waiting for "OOM" is not correct at
1709  * all. Similarly, missing reference frames have to be replaced by
1710  * interpolated/MC frames, anything else is a bug in the codec ...
1711  */
1712  abort();
1713  return -1;
1714 }
1715 
1717 {
1718  int ret = find_unused_picture(s, shared);
1719 
1720  if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1721  if (s->picture[ret].needs_realloc) {
1722  s->picture[ret].needs_realloc = 0;
1723  ff_free_picture_tables(&s->picture[ret]);
1724  ff_mpeg_unref_picture(s, &s->picture[ret]);
1725  }
1726  }
1727  return ret;
1728 }
1729 
1730 static void gray_frame(AVFrame *frame)
1731 {
1732  int i, h_chroma_shift, v_chroma_shift;
1733 
1734  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1735 
1736  for(i=0; i<frame->height; i++)
1737  memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1738  for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1739  memset(frame->data[1] + frame->linesize[1]*i,
1740  0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1741  memset(frame->data[2] + frame->linesize[2]*i,
1742  0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1743  }
1744 }
1745 
1746 /**
1747  * generic function called after decoding
1748  * the header and before a frame is decoded.
1749  */
1751 {
1752  int i, ret;
1753  Picture *pic;
1754  s->mb_skipped = 0;
1755 
1756  if (!ff_thread_can_start_frame(avctx)) {
1757  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1758  return -1;
1759  }
1760 
1761  /* mark & release old frames */
1762  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1764  s->last_picture_ptr->f->buf[0]) {
1766  }
1767 
1768  /* release forgotten pictures */
1769  /* if (mpeg124/h263) */
1770  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1771  if (&s->picture[i] != s->last_picture_ptr &&
1772  &s->picture[i] != s->next_picture_ptr &&
1773  s->picture[i].reference && !s->picture[i].needs_realloc) {
1774  if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1775  av_log(avctx, AV_LOG_ERROR,
1776  "releasing zombie picture\n");
1777  ff_mpeg_unref_picture(s, &s->picture[i]);
1778  }
1779  }
1780 
1782 
1784 
1785  if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1786  // we already have a unused image
1787  // (maybe it was set before reading the header)
1788  pic = s->current_picture_ptr;
1789  } else {
1790  i = ff_find_unused_picture(s, 0);
1791  if (i < 0) {
1792  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1793  return i;
1794  }
1795  pic = &s->picture[i];
1796  }
1797 
1798  pic->reference = 0;
1799  if (!s->droppable) {
1800  if (s->pict_type != AV_PICTURE_TYPE_B)
1801  pic->reference = 3;
1802  }
1803 
1805 
1806  if (ff_alloc_picture(s, pic, 0) < 0)
1807  return -1;
1808 
1809  s->current_picture_ptr = pic;
1810  // FIXME use only the vars from current_pic
1812  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1814  if (s->picture_structure != PICT_FRAME)
1817  }
1821 
1823  // if (s->flags && CODEC_FLAG_QSCALE)
1824  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1826 
1827  if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1828  s->current_picture_ptr)) < 0)
1829  return ret;
1830 
1831  if (s->pict_type != AV_PICTURE_TYPE_B) {
1833  if (!s->droppable)
1835  }
1836  av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1838  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1839  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1840  s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1841  s->pict_type, s->droppable);
1842 
1843  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1844  (s->pict_type != AV_PICTURE_TYPE_I ||
1845  s->picture_structure != PICT_FRAME)) {
1846  int h_chroma_shift, v_chroma_shift;
1848  &h_chroma_shift, &v_chroma_shift);
1850  av_log(avctx, AV_LOG_DEBUG,
1851  "allocating dummy last picture for B frame\n");
1852  else if (s->pict_type != AV_PICTURE_TYPE_I)
1853  av_log(avctx, AV_LOG_ERROR,
1854  "warning: first frame is no keyframe\n");
1855  else if (s->picture_structure != PICT_FRAME)
1856  av_log(avctx, AV_LOG_DEBUG,
1857  "allocate dummy last picture for field based first keyframe\n");
1858 
1859  /* Allocate a dummy frame */
1860  i = ff_find_unused_picture(s, 0);
1861  if (i < 0) {
1862  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1863  return i;
1864  }
1865  s->last_picture_ptr = &s->picture[i];
1866 
1867  s->last_picture_ptr->reference = 3;
1868  s->last_picture_ptr->f->key_frame = 0;
1870 
1871  if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1872  s->last_picture_ptr = NULL;
1873  return -1;
1874  }
1875 
1876  if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1877  for(i=0; i<avctx->height; i++)
1878  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1879  0x80, avctx->width);
1880  for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1881  memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1882  0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1883  memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1884  0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1885  }
1886 
1888  for(i=0; i<avctx->height; i++)
1889  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1890  }
1891  }
1892 
1893  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1894  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1895  }
1896  if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1897  s->pict_type == AV_PICTURE_TYPE_B) {
1898  /* Allocate a dummy frame */
1899  i = ff_find_unused_picture(s, 0);
1900  if (i < 0) {
1901  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1902  return i;
1903  }
1904  s->next_picture_ptr = &s->picture[i];
1905 
1906  s->next_picture_ptr->reference = 3;
1907  s->next_picture_ptr->f->key_frame = 0;
1909 
1910  if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1911  s->next_picture_ptr = NULL;
1912  return -1;
1913  }
1914  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1915  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1916  }
1917 
1918 #if 0 // BUFREF-FIXME
1919  memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1920  memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1921 #endif
1922  if (s->last_picture_ptr) {
1924  if (s->last_picture_ptr->f->buf[0] &&
1925  (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1926  s->last_picture_ptr)) < 0)
1927  return ret;
1928  }
1929  if (s->next_picture_ptr) {
1931  if (s->next_picture_ptr->f->buf[0] &&
1932  (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1933  s->next_picture_ptr)) < 0)
1934  return ret;
1935  }
1936 
1938  s->last_picture_ptr->f->buf[0]));
1939 
1940  if (s->picture_structure!= PICT_FRAME) {
1941  int i;
1942  for (i = 0; i < 4; i++) {
1944  s->current_picture.f->data[i] +=
1945  s->current_picture.f->linesize[i];
1946  }
1947  s->current_picture.f->linesize[i] *= 2;
1948  s->last_picture.f->linesize[i] *= 2;
1949  s->next_picture.f->linesize[i] *= 2;
1950  }
1951  }
1952 
1953  s->err_recognition = avctx->err_recognition;
1954 
1955  /* set dequantizer, we can't do it during init as
1956  * it might change for mpeg4 and we can't do it in the header
1957  * decode as init is not called for mpeg4 there yet */
1958  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1961  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1964  } else {
1967  }
1968 
1969  if (s->avctx->debug & FF_DEBUG_NOMC) {
1971  }
1972 
1973  return 0;
1974 }
1975 
1976 /* called after a frame has been decoded. */
1978 {
1979  emms_c();
1980 
1981  if (s->current_picture.reference)
1983 }
1984 
1985 
1986 #if FF_API_VISMV
1987 static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
1988 {
1989  if(*sx > *ex)
1990  return clip_line(ex, ey, sx, sy, maxx);
1991 
1992  if (*sx < 0) {
1993  if (*ex < 0)
1994  return 1;
1995  *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
1996  *sx = 0;
1997  }
1998 
1999  if (*ex > maxx) {
2000  if (*sx > maxx)
2001  return 1;
2002  *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
2003  *ex = maxx;
2004  }
2005  return 0;
2006 }
2007 
2008 
2009 /**
2010  * Draw a line from (ex, ey) -> (sx, sy).
2011  * @param w width of the image
2012  * @param h height of the image
2013  * @param stride stride/linesize of the image
2014  * @param color color of the arrow
2015  */
2016 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
2017  int w, int h, int stride, int color)
2018 {
2019  int x, y, fr, f;
2020 
2021  if (clip_line(&sx, &sy, &ex, &ey, w - 1))
2022  return;
2023  if (clip_line(&sy, &sx, &ey, &ex, h - 1))
2024  return;
2025 
2026  sx = av_clip(sx, 0, w - 1);
2027  sy = av_clip(sy, 0, h - 1);
2028  ex = av_clip(ex, 0, w - 1);
2029  ey = av_clip(ey, 0, h - 1);
2030 
2031  buf[sy * stride + sx] += color;
2032 
2033  if (FFABS(ex - sx) > FFABS(ey - sy)) {
2034  if (sx > ex) {
2035  FFSWAP(int, sx, ex);
2036  FFSWAP(int, sy, ey);
2037  }
2038  buf += sx + sy * stride;
2039  ex -= sx;
2040  f = ((ey - sy) << 16) / ex;
2041  for (x = 0; x <= ex; x++) {
2042  y = (x * f) >> 16;
2043  fr = (x * f) & 0xFFFF;
2044  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2045  if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2046  }
2047  } else {
2048  if (sy > ey) {
2049  FFSWAP(int, sx, ex);
2050  FFSWAP(int, sy, ey);
2051  }
2052  buf += sx + sy * stride;
2053  ey -= sy;
2054  if (ey)
2055  f = ((ex - sx) << 16) / ey;
2056  else
2057  f = 0;
2058  for(y= 0; y <= ey; y++){
2059  x = (y*f) >> 16;
2060  fr = (y*f) & 0xFFFF;
2061  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2062  if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2063  }
2064  }
2065 }
2066 
2067 /**
2068  * Draw an arrow from (ex, ey) -> (sx, sy).
2069  * @param w width of the image
2070  * @param h height of the image
2071  * @param stride stride/linesize of the image
2072  * @param color color of the arrow
2073  */
2074 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2075  int ey, int w, int h, int stride, int color, int tail, int direction)
2076 {
2077  int dx,dy;
2078 
2079  if (direction) {
2080  FFSWAP(int, sx, ex);
2081  FFSWAP(int, sy, ey);
2082  }
2083 
2084  sx = av_clip(sx, -100, w + 100);
2085  sy = av_clip(sy, -100, h + 100);
2086  ex = av_clip(ex, -100, w + 100);
2087  ey = av_clip(ey, -100, h + 100);
2088 
2089  dx = ex - sx;
2090  dy = ey - sy;
2091 
2092  if (dx * dx + dy * dy > 3 * 3) {
2093  int rx = dx + dy;
2094  int ry = -dx + dy;
2095  int length = ff_sqrt((rx * rx + ry * ry) << 8);
2096 
2097  // FIXME subpixel accuracy
2098  rx = ROUNDED_DIV(rx * 3 << 4, length);
2099  ry = ROUNDED_DIV(ry * 3 << 4, length);
2100 
2101  if (tail) {
2102  rx = -rx;
2103  ry = -ry;
2104  }
2105 
2106  draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2107  draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2108  }
2109  draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2110 }
2111 #endif
2112 
2113 static int add_mb(AVMotionVector *mb, uint32_t mb_type,
2114  int dst_x, int dst_y,
2115  int src_x, int src_y,
2116  int direction)
2117 {
2118  if (dst_x == src_x && dst_y == src_y)
2119  return 0;
2120  mb->w = IS_8X8(mb_type) || IS_8X16(mb_type) ? 8 : 16;
2121  mb->h = IS_8X8(mb_type) || IS_16X8(mb_type) ? 8 : 16;
2122  mb->src_x = src_x;
2123  mb->src_y = src_y;
2124  mb->dst_x = dst_x;
2125  mb->dst_y = dst_y;
2126  mb->source = direction ? 1 : -1;
2127  mb->flags = 0; // XXX: does mb_type contain extra information that could be exported here?
2128  return 1;
2129 }
2130 
2131 /**
2132  * Print debugging info for the given picture.
2133  */
2134 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2135  uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2136  int *low_delay,
2137  int mb_width, int mb_height, int mb_stride, int quarter_sample)
2138 {
2139  if ((avctx->flags2 & CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
2140  const int shift = 1 + quarter_sample;
2141  const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2142  const int mv_stride = (mb_width << mv_sample_log2) +
2143  (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2144  int mb_x, mb_y, mbcount = 0;
2145 
2146  /* size is width * height * 2 * 4 where 2 is for directions and 4 is
2147  * for the maximum number of MB (4 MB in case of IS_8x8) */
2148  AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector));
2149  if (!mvs)
2150  return;
2151 
2152  for (mb_y = 0; mb_y < mb_height; mb_y++) {
2153  for (mb_x = 0; mb_x < mb_width; mb_x++) {
2154  int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
2155  for (direction = 0; direction < 2; direction++) {
2156  if (!USES_LIST(mb_type, direction))
2157  continue;
2158  if (IS_8X8(mb_type)) {
2159  for (i = 0; i < 4; i++) {
2160  int sx = mb_x * 16 + 4 + 8 * (i & 1);
2161  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2162  int xy = (mb_x * 2 + (i & 1) +
2163  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2164  int mx = (motion_val[direction][xy][0] >> shift) + sx;
2165  int my = (motion_val[direction][xy][1] >> shift) + sy;
2166  mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2167  }
2168  } else if (IS_16X8(mb_type)) {
2169  for (i = 0; i < 2; i++) {
2170  int sx = mb_x * 16 + 8;
2171  int sy = mb_y * 16 + 4 + 8 * i;
2172  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2173  int mx = (motion_val[direction][xy][0] >> shift);
2174  int my = (motion_val[direction][xy][1] >> shift);
2175 
2176  if (IS_INTERLACED(mb_type))
2177  my *= 2;
2178 
2179  mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2180  }
2181  } else if (IS_8X16(mb_type)) {
2182  for (i = 0; i < 2; i++) {
2183  int sx = mb_x * 16 + 4 + 8 * i;
2184  int sy = mb_y * 16 + 8;
2185  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2186  int mx = motion_val[direction][xy][0] >> shift;
2187  int my = motion_val[direction][xy][1] >> shift;
2188 
2189  if (IS_INTERLACED(mb_type))
2190  my *= 2;
2191 
2192  mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2193  }
2194  } else {
2195  int sx = mb_x * 16 + 8;
2196  int sy = mb_y * 16 + 8;
2197  int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
2198  int mx = (motion_val[direction][xy][0]>>shift) + sx;
2199  int my = (motion_val[direction][xy][1]>>shift) + sy;
2200  mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2201  }
2202  }
2203  }
2204  }
2205 
2206  if (mbcount) {
2207  AVFrameSideData *sd;
2208 
2209  av_log(avctx, AV_LOG_DEBUG, "Adding %d MVs info to frame %d\n", mbcount, avctx->frame_number);
2211  if (!sd)
2212  return;
2213  memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector));
2214  }
2215 
2216  av_freep(&mvs);
2217  }
2218 
2219  /* TODO: export all the following to make them accessible for users (and filters) */
2220  if (avctx->hwaccel || !mbtype_table
2222  return;
2223 
2224 
2225  if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2226  int x,y;
2227 
2228  av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2230  for (y = 0; y < mb_height; y++) {
2231  for (x = 0; x < mb_width; x++) {
2232  if (avctx->debug & FF_DEBUG_SKIP) {
2233  int count = mbskip_table[x + y * mb_stride];
2234  if (count > 9)
2235  count = 9;
2236  av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2237  }
2238  if (avctx->debug & FF_DEBUG_QP) {
2239  av_log(avctx, AV_LOG_DEBUG, "%2d",
2240  qscale_table[x + y * mb_stride]);
2241  }
2242  if (avctx->debug & FF_DEBUG_MB_TYPE) {
2243  int mb_type = mbtype_table[x + y * mb_stride];
2244  // Type & MV direction
2245  if (IS_PCM(mb_type))
2246  av_log(avctx, AV_LOG_DEBUG, "P");
2247  else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2248  av_log(avctx, AV_LOG_DEBUG, "A");
2249  else if (IS_INTRA4x4(mb_type))
2250  av_log(avctx, AV_LOG_DEBUG, "i");
2251  else if (IS_INTRA16x16(mb_type))
2252  av_log(avctx, AV_LOG_DEBUG, "I");
2253  else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2254  av_log(avctx, AV_LOG_DEBUG, "d");
2255  else if (IS_DIRECT(mb_type))
2256  av_log(avctx, AV_LOG_DEBUG, "D");
2257  else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2258  av_log(avctx, AV_LOG_DEBUG, "g");
2259  else if (IS_GMC(mb_type))
2260  av_log(avctx, AV_LOG_DEBUG, "G");
2261  else if (IS_SKIP(mb_type))
2262  av_log(avctx, AV_LOG_DEBUG, "S");
2263  else if (!USES_LIST(mb_type, 1))
2264  av_log(avctx, AV_LOG_DEBUG, ">");
2265  else if (!USES_LIST(mb_type, 0))
2266  av_log(avctx, AV_LOG_DEBUG, "<");
2267  else {
2268  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2269  av_log(avctx, AV_LOG_DEBUG, "X");
2270  }
2271 
2272  // segmentation
2273  if (IS_8X8(mb_type))
2274  av_log(avctx, AV_LOG_DEBUG, "+");
2275  else if (IS_16X8(mb_type))
2276  av_log(avctx, AV_LOG_DEBUG, "-");
2277  else if (IS_8X16(mb_type))
2278  av_log(avctx, AV_LOG_DEBUG, "|");
2279  else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2280  av_log(avctx, AV_LOG_DEBUG, " ");
2281  else
2282  av_log(avctx, AV_LOG_DEBUG, "?");
2283 
2284 
2285  if (IS_INTERLACED(mb_type))
2286  av_log(avctx, AV_LOG_DEBUG, "=");
2287  else
2288  av_log(avctx, AV_LOG_DEBUG, " ");
2289  }
2290  }
2291  av_log(avctx, AV_LOG_DEBUG, "\n");
2292  }
2293  }
2294 
2295  if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2296  (avctx->debug_mv)) {
2297  int mb_y;
2298  int i;
2299  int h_chroma_shift, v_chroma_shift, block_height;
2300 #if FF_API_VISMV
2301  const int shift = 1 + quarter_sample;
2302  uint8_t *ptr;
2303  const int width = avctx->width;
2304  const int height = avctx->height;
2305 #endif
2306  const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2307  const int mv_stride = (mb_width << mv_sample_log2) +
2308  (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2309 
2310  *low_delay = 0; // needed to see the vectors without trashing the buffers
2311 
2312  avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2313 
2314  av_frame_make_writable(pict);
2315 
2316  pict->opaque = NULL;
2317 #if FF_API_VISMV
2318  ptr = pict->data[0];
2319 #endif
2320  block_height = 16 >> v_chroma_shift;
2321 
2322  for (mb_y = 0; mb_y < mb_height; mb_y++) {
2323  int mb_x;
2324  for (mb_x = 0; mb_x < mb_width; mb_x++) {
2325  const int mb_index = mb_x + mb_y * mb_stride;
2326 #if FF_API_VISMV
2327  if ((avctx->debug_mv) && motion_val[0]) {
2328  int type;
2329  for (type = 0; type < 3; type++) {
2330  int direction = 0;
2331  switch (type) {
2332  case 0:
2333  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2334  (pict->pict_type!= AV_PICTURE_TYPE_P))
2335  continue;
2336  direction = 0;
2337  break;
2338  case 1:
2339  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2340  (pict->pict_type!= AV_PICTURE_TYPE_B))
2341  continue;
2342  direction = 0;
2343  break;
2344  case 2:
2345  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2346  (pict->pict_type!= AV_PICTURE_TYPE_B))
2347  continue;
2348  direction = 1;
2349  break;
2350  }
2351  if (!USES_LIST(mbtype_table[mb_index], direction))
2352  continue;
2353 
2354  if (IS_8X8(mbtype_table[mb_index])) {
2355  int i;
2356  for (i = 0; i < 4; i++) {
2357  int sx = mb_x * 16 + 4 + 8 * (i & 1);
2358  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2359  int xy = (mb_x * 2 + (i & 1) +
2360  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2361  int mx = (motion_val[direction][xy][0] >> shift) + sx;
2362  int my = (motion_val[direction][xy][1] >> shift) + sy;
2363  draw_arrow(ptr, sx, sy, mx, my, width,
2364  height, pict->linesize[0], 100, 0, direction);
2365  }
2366  } else if (IS_16X8(mbtype_table[mb_index])) {
2367  int i;
2368  for (i = 0; i < 2; i++) {
2369  int sx = mb_x * 16 + 8;
2370  int sy = mb_y * 16 + 4 + 8 * i;
2371  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2372  int mx = (motion_val[direction][xy][0] >> shift);
2373  int my = (motion_val[direction][xy][1] >> shift);
2374 
2375  if (IS_INTERLACED(mbtype_table[mb_index]))
2376  my *= 2;
2377 
2378  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2379  height, pict->linesize[0], 100, 0, direction);
2380  }
2381  } else if (IS_8X16(mbtype_table[mb_index])) {
2382  int i;
2383  for (i = 0; i < 2; i++) {
2384  int sx = mb_x * 16 + 4 + 8 * i;
2385  int sy = mb_y * 16 + 8;
2386  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2387  int mx = motion_val[direction][xy][0] >> shift;
2388  int my = motion_val[direction][xy][1] >> shift;
2389 
2390  if (IS_INTERLACED(mbtype_table[mb_index]))
2391  my *= 2;
2392 
2393  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2394  height, pict->linesize[0], 100, 0, direction);
2395  }
2396  } else {
2397  int sx= mb_x * 16 + 8;
2398  int sy= mb_y * 16 + 8;
2399  int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2400  int mx= (motion_val[direction][xy][0]>>shift) + sx;
2401  int my= (motion_val[direction][xy][1]>>shift) + sy;
2402  draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2403  }
2404  }
2405  }
2406 #endif
2407  if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2408  uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2409  0x0101010101010101ULL;
2410  int y;
2411  for (y = 0; y < block_height; y++) {
2412  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2413  (block_height * mb_y + y) *
2414  pict->linesize[1]) = c;
2415  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2416  (block_height * mb_y + y) *
2417  pict->linesize[2]) = c;
2418  }
2419  }
2420  if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2421  motion_val[0]) {
2422  int mb_type = mbtype_table[mb_index];
2423  uint64_t u,v;
2424  int y;
2425 #define COLOR(theta, r) \
2426  u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2427  v = (int)(128 + r * sin(theta * 3.141592 / 180));
2428 
2429 
2430  u = v = 128;
2431  if (IS_PCM(mb_type)) {
2432  COLOR(120, 48)
2433  } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2434  IS_INTRA16x16(mb_type)) {
2435  COLOR(30, 48)
2436  } else if (IS_INTRA4x4(mb_type)) {
2437  COLOR(90, 48)
2438  } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2439  // COLOR(120, 48)
2440  } else if (IS_DIRECT(mb_type)) {
2441  COLOR(150, 48)
2442  } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2443  COLOR(170, 48)
2444  } else if (IS_GMC(mb_type)) {
2445  COLOR(190, 48)
2446  } else if (IS_SKIP(mb_type)) {
2447  // COLOR(180, 48)
2448  } else if (!USES_LIST(mb_type, 1)) {
2449  COLOR(240, 48)
2450  } else if (!USES_LIST(mb_type, 0)) {
2451  COLOR(0, 48)
2452  } else {
2453  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2454  COLOR(300,48)
2455  }
2456 
2457  u *= 0x0101010101010101ULL;
2458  v *= 0x0101010101010101ULL;
2459  for (y = 0; y < block_height; y++) {
2460  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2461  (block_height * mb_y + y) * pict->linesize[1]) = u;
2462  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2463  (block_height * mb_y + y) * pict->linesize[2]) = v;
2464  }
2465 
2466  // segmentation
2467  if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2468  *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2469  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2470  *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2471  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2472  }
2473  if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2474  for (y = 0; y < 16; y++)
2475  pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2476  pict->linesize[0]] ^= 0x80;
2477  }
2478  if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2479  int dm = 1 << (mv_sample_log2 - 2);
2480  for (i = 0; i < 4; i++) {
2481  int sx = mb_x * 16 + 8 * (i & 1);
2482  int sy = mb_y * 16 + 8 * (i >> 1);
2483  int xy = (mb_x * 2 + (i & 1) +
2484  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2485  // FIXME bidir
2486  int32_t *mv = (int32_t *) &motion_val[0][xy];
2487  if (mv[0] != mv[dm] ||
2488  mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2489  for (y = 0; y < 8; y++)
2490  pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2491  if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2492  *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2493  pict->linesize[0]) ^= 0x8080808080808080ULL;
2494  }
2495  }
2496 
2497  if (IS_INTERLACED(mb_type) &&
2498  avctx->codec->id == AV_CODEC_ID_H264) {
2499  // hmm
2500  }
2501  }
2502  mbskip_table[mb_index] = 0;
2503  }
2504  }
2505  }
2506 }
2507 
2509 {
2511  p->qscale_table, p->motion_val, &s->low_delay,
2512  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2513 }
2514 
2516 {
2518  int offset = 2*s->mb_stride + 1;
2519  if(!ref)
2520  return AVERROR(ENOMEM);
2521  av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2522  ref->size -= offset;
2523  ref->data += offset;
2524  return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2525 }
2526 
2528  uint8_t *dest, uint8_t *src,
2529  int field_based, int field_select,
2530  int src_x, int src_y,
2531  int width, int height, ptrdiff_t stride,
2532  int h_edge_pos, int v_edge_pos,
2533  int w, int h, h264_chroma_mc_func *pix_op,
2534  int motion_x, int motion_y)
2535 {
2536  const int lowres = s->avctx->lowres;
2537  const int op_index = FFMIN(lowres, 3);
2538  const int s_mask = (2 << lowres) - 1;
2539  int emu = 0;
2540  int sx, sy;
2541 
2542  if (s->quarter_sample) {
2543  motion_x /= 2;
2544  motion_y /= 2;
2545  }
2546 
2547  sx = motion_x & s_mask;
2548  sy = motion_y & s_mask;
2549  src_x += motion_x >> lowres + 1;
2550  src_y += motion_y >> lowres + 1;
2551 
2552  src += src_y * stride + src_x;
2553 
2554  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2555  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2557  s->linesize, s->linesize,
2558  w + 1, (h + 1) << field_based,
2559  src_x, src_y << field_based,
2560  h_edge_pos, v_edge_pos);
2561  src = s->edge_emu_buffer;
2562  emu = 1;
2563  }
2564 
2565  sx = (sx << 2) >> lowres;
2566  sy = (sy << 2) >> lowres;
2567  if (field_select)
2568  src += s->linesize;
2569  pix_op[op_index](dest, src, stride, h, sx, sy);
2570  return emu;
2571 }
2572 
2573 /* apply one mpeg motion vector to the three components */
2575  uint8_t *dest_y,
2576  uint8_t *dest_cb,
2577  uint8_t *dest_cr,
2578  int field_based,
2579  int bottom_field,
2580  int field_select,
2581  uint8_t **ref_picture,
2582  h264_chroma_mc_func *pix_op,
2583  int motion_x, int motion_y,
2584  int h, int mb_y)
2585 {
2586  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2587  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2588  ptrdiff_t uvlinesize, linesize;
2589  const int lowres = s->avctx->lowres;
2590  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2591  const int block_s = 8>>lowres;
2592  const int s_mask = (2 << lowres) - 1;
2593  const int h_edge_pos = s->h_edge_pos >> lowres;
2594  const int v_edge_pos = s->v_edge_pos >> lowres;
2595  linesize = s->current_picture.f->linesize[0] << field_based;
2596  uvlinesize = s->current_picture.f->linesize[1] << field_based;
2597 
2598  // FIXME obviously not perfect but qpel will not work in lowres anyway
2599  if (s->quarter_sample) {
2600  motion_x /= 2;
2601  motion_y /= 2;
2602  }
2603 
2604  if(field_based){
2605  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2606  }
2607 
2608  sx = motion_x & s_mask;
2609  sy = motion_y & s_mask;
2610  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2611  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2612 
2613  if (s->out_format == FMT_H263) {
2614  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2615  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2616  uvsrc_x = src_x >> 1;
2617  uvsrc_y = src_y >> 1;
2618  } else if (s->out_format == FMT_H261) {
2619  // even chroma mv's are full pel in H261
2620  mx = motion_x / 4;
2621  my = motion_y / 4;
2622  uvsx = (2 * mx) & s_mask;
2623  uvsy = (2 * my) & s_mask;
2624  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2625  uvsrc_y = mb_y * block_s + (my >> lowres);
2626  } else {
2627  if(s->chroma_y_shift){
2628  mx = motion_x / 2;
2629  my = motion_y / 2;
2630  uvsx = mx & s_mask;
2631  uvsy = my & s_mask;
2632  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2633  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2634  } else {
2635  if(s->chroma_x_shift){
2636  //Chroma422
2637  mx = motion_x / 2;
2638  uvsx = mx & s_mask;
2639  uvsy = motion_y & s_mask;
2640  uvsrc_y = src_y;
2641  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2642  } else {
2643  //Chroma444
2644  uvsx = motion_x & s_mask;
2645  uvsy = motion_y & s_mask;
2646  uvsrc_x = src_x;
2647  uvsrc_y = src_y;
2648  }
2649  }
2650  }
2651 
2652  ptr_y = ref_picture[0] + src_y * linesize + src_x;
2653  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2654  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2655 
2656  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2657  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2658  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2659  linesize >> field_based, linesize >> field_based,
2660  17, 17 + field_based,
2661  src_x, src_y << field_based, h_edge_pos,
2662  v_edge_pos);
2663  ptr_y = s->edge_emu_buffer;
2664  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2665  uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2666  uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2667  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2668  uvlinesize >> field_based, uvlinesize >> field_based,
2669  9, 9 + field_based,
2670  uvsrc_x, uvsrc_y << field_based,
2671  h_edge_pos >> 1, v_edge_pos >> 1);
2672  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2673  uvlinesize >> field_based,uvlinesize >> field_based,
2674  9, 9 + field_based,
2675  uvsrc_x, uvsrc_y << field_based,
2676  h_edge_pos >> 1, v_edge_pos >> 1);
2677  ptr_cb = ubuf;
2678  ptr_cr = vbuf;
2679  }
2680  }
2681 
2682  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2683  if (bottom_field) {
2684  dest_y += s->linesize;
2685  dest_cb += s->uvlinesize;
2686  dest_cr += s->uvlinesize;
2687  }
2688 
2689  if (field_select) {
2690  ptr_y += s->linesize;
2691  ptr_cb += s->uvlinesize;
2692  ptr_cr += s->uvlinesize;
2693  }
2694 
2695  sx = (sx << 2) >> lowres;
2696  sy = (sy << 2) >> lowres;
2697  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2698 
2699  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2700  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2701  uvsx = (uvsx << 2) >> lowres;
2702  uvsy = (uvsy << 2) >> lowres;
2703  if (hc) {
2704  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2705  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2706  }
2707  }
2708  // FIXME h261 lowres loop filter
2709 }
2710 
2712  uint8_t *dest_cb, uint8_t *dest_cr,
2713  uint8_t **ref_picture,
2714  h264_chroma_mc_func * pix_op,
2715  int mx, int my)
2716 {
2717  const int lowres = s->avctx->lowres;
2718  const int op_index = FFMIN(lowres, 3);
2719  const int block_s = 8 >> lowres;
2720  const int s_mask = (2 << lowres) - 1;
2721  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2722  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2723  int emu = 0, src_x, src_y, sx, sy;
2724  ptrdiff_t offset;
2725  uint8_t *ptr;
2726 
2727  if (s->quarter_sample) {
2728  mx /= 2;
2729  my /= 2;
2730  }
2731 
2732  /* In case of 8X8, we construct a single chroma motion vector
2733  with a special rounding */
2734  mx = ff_h263_round_chroma(mx);
2735  my = ff_h263_round_chroma(my);
2736 
2737  sx = mx & s_mask;
2738  sy = my & s_mask;
2739  src_x = s->mb_x * block_s + (mx >> lowres + 1);
2740  src_y = s->mb_y * block_s + (my >> lowres + 1);
2741 
2742  offset = src_y * s->uvlinesize + src_x;
2743  ptr = ref_picture[1] + offset;
2744  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2745  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2747  s->uvlinesize, s->uvlinesize,
2748  9, 9,
2749  src_x, src_y, h_edge_pos, v_edge_pos);
2750  ptr = s->edge_emu_buffer;
2751  emu = 1;
2752  }
2753  sx = (sx << 2) >> lowres;
2754  sy = (sy << 2) >> lowres;
2755  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2756 
2757  ptr = ref_picture[2] + offset;
2758  if (emu) {
2760  s->uvlinesize, s->uvlinesize,
2761  9, 9,
2762  src_x, src_y, h_edge_pos, v_edge_pos);
2763  ptr = s->edge_emu_buffer;
2764  }
2765  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2766 }
2767 
2768 /**
2769  * motion compensation of a single macroblock
2770  * @param s context
2771  * @param dest_y luma destination pointer
2772  * @param dest_cb chroma cb/u destination pointer
2773  * @param dest_cr chroma cr/v destination pointer
2774  * @param dir direction (0->forward, 1->backward)
2775  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2776  * @param pix_op halfpel motion compensation function (average or put normally)
2777  * the motion vectors are taken from s->mv and the MV type from s->mv_type
2778  */
2779 static inline void MPV_motion_lowres(MpegEncContext *s,
2780  uint8_t *dest_y, uint8_t *dest_cb,
2781  uint8_t *dest_cr,
2782  int dir, uint8_t **ref_picture,
2783  h264_chroma_mc_func *pix_op)
2784 {
2785  int mx, my;
2786  int mb_x, mb_y, i;
2787  const int lowres = s->avctx->lowres;
2788  const int block_s = 8 >>lowres;
2789 
2790  mb_x = s->mb_x;
2791  mb_y = s->mb_y;
2792 
2793  switch (s->mv_type) {
2794  case MV_TYPE_16X16:
2795  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2796  0, 0, 0,
2797  ref_picture, pix_op,
2798  s->mv[dir][0][0], s->mv[dir][0][1],
2799  2 * block_s, mb_y);
2800  break;
2801  case MV_TYPE_8X8:
2802  mx = 0;
2803  my = 0;
2804  for (i = 0; i < 4; i++) {
2805  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2806  s->linesize) * block_s,
2807  ref_picture[0], 0, 0,
2808  (2 * mb_x + (i & 1)) * block_s,
2809  (2 * mb_y + (i >> 1)) * block_s,
2810  s->width, s->height, s->linesize,
2811  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2812  block_s, block_s, pix_op,
2813  s->mv[dir][i][0], s->mv[dir][i][1]);
2814 
2815  mx += s->mv[dir][i][0];
2816  my += s->mv[dir][i][1];
2817  }
2818 
2819  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2820  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2821  pix_op, mx, my);
2822  break;
2823  case MV_TYPE_FIELD:
2824  if (s->picture_structure == PICT_FRAME) {
2825  /* top field */
2826  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2827  1, 0, s->field_select[dir][0],
2828  ref_picture, pix_op,
2829  s->mv[dir][0][0], s->mv[dir][0][1],
2830  block_s, mb_y);
2831  /* bottom field */
2832  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2833  1, 1, s->field_select[dir][1],
2834  ref_picture, pix_op,
2835  s->mv[dir][1][0], s->mv[dir][1][1],
2836  block_s, mb_y);
2837  } else {
2838  if (s->picture_structure != s->field_select[dir][0] + 1 &&
2839  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2840  ref_picture = s->current_picture_ptr->f->data;
2841 
2842  }
2843  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2844  0, 0, s->field_select[dir][0],
2845  ref_picture, pix_op,
2846  s->mv[dir][0][0],
2847  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2848  }
2849  break;
2850  case MV_TYPE_16X8:
2851  for (i = 0; i < 2; i++) {
2852  uint8_t **ref2picture;
2853 
2854  if (s->picture_structure == s->field_select[dir][i] + 1 ||
2855  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2856  ref2picture = ref_picture;
2857  } else {
2858  ref2picture = s->current_picture_ptr->f->data;
2859  }
2860 
2861  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2862  0, 0, s->field_select[dir][i],
2863  ref2picture, pix_op,
2864  s->mv[dir][i][0], s->mv[dir][i][1] +
2865  2 * block_s * i, block_s, mb_y >> 1);
2866 
2867  dest_y += 2 * block_s * s->linesize;
2868  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2869  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2870  }
2871  break;
2872  case MV_TYPE_DMV:
2873  if (s->picture_structure == PICT_FRAME) {
2874  for (i = 0; i < 2; i++) {
2875  int j;
2876  for (j = 0; j < 2; j++) {
2877  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2878  1, j, j ^ i,
2879  ref_picture, pix_op,
2880  s->mv[dir][2 * i + j][0],
2881  s->mv[dir][2 * i + j][1],
2882  block_s, mb_y);
2883  }
2885  }
2886  } else {
2887  for (i = 0; i < 2; i++) {
2888  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2889  0, 0, s->picture_structure != i + 1,
2890  ref_picture, pix_op,
2891  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2892  2 * block_s, mb_y >> 1);
2893 
2894  // after put we make avg of the same block
2896 
2897  // opposite parity is always in the same
2898  // frame if this is second field
2899  if (!s->first_field) {
2900  ref_picture = s->current_picture_ptr->f->data;
2901  }
2902  }
2903  }
2904  break;
2905  default:
2906  av_assert2(0);
2907  }
2908 }
2909 
2910 /**
2911  * find the lowest MB row referenced in the MVs
2912  */
2914 {
2915  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2916  int my, off, i, mvs;
2917 
2918  if (s->picture_structure != PICT_FRAME || s->mcsel)
2919  goto unhandled;
2920 
2921  switch (s->mv_type) {
2922  case MV_TYPE_16X16:
2923  mvs = 1;
2924  break;
2925  case MV_TYPE_16X8:
2926  mvs = 2;
2927  break;
2928  case MV_TYPE_8X8:
2929  mvs = 4;
2930  break;
2931  default:
2932  goto unhandled;
2933  }
2934 
2935  for (i = 0; i < mvs; i++) {
2936  my = s->mv[dir][i][1]<<qpel_shift;
2937  my_max = FFMAX(my_max, my);
2938  my_min = FFMIN(my_min, my);
2939  }
2940 
2941  off = (FFMAX(-my_min, my_max) + 63) >> 6;
2942 
2943  return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2944 unhandled:
2945  return s->mb_height-1;
2946 }
2947 
2948 /* put block[] to dest[] */
2949 static inline void put_dct(MpegEncContext *s,
2950  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2951 {
2952  s->dct_unquantize_intra(s, block, i, qscale);
2953  s->idsp.idct_put(dest, line_size, block);
2954 }
2955 
2956 /* add block[] to dest[] */
2957 static inline void add_dct(MpegEncContext *s,
2958  int16_t *block, int i, uint8_t *dest, int line_size)
2959 {
2960  if (s->block_last_index[i] >= 0) {
2961  s->idsp.idct_add(dest, line_size, block);
2962  }
2963 }
2964 
2965 static inline void add_dequant_dct(MpegEncContext *s,
2966  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2967 {
2968  if (s->block_last_index[i] >= 0) {
2969  s->dct_unquantize_inter(s, block, i, qscale);
2970 
2971  s->idsp.idct_add(dest, line_size, block);
2972  }
2973 }
2974 
2975 /**
2976  * Clean dc, ac, coded_block for the current non-intra MB.
2977  */
2979 {
2980  int wrap = s->b8_stride;
2981  int xy = s->block_index[0];
2982 
2983  s->dc_val[0][xy ] =
2984  s->dc_val[0][xy + 1 ] =
2985  s->dc_val[0][xy + wrap] =
2986  s->dc_val[0][xy + 1 + wrap] = 1024;
2987  /* ac pred */
2988  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2989  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2990  if (s->msmpeg4_version>=3) {
2991  s->coded_block[xy ] =
2992  s->coded_block[xy + 1 ] =
2993  s->coded_block[xy + wrap] =
2994  s->coded_block[xy + 1 + wrap] = 0;
2995  }
2996  /* chroma */
2997  wrap = s->mb_stride;
2998  xy = s->mb_x + s->mb_y * wrap;
2999  s->dc_val[1][xy] =
3000  s->dc_val[2][xy] = 1024;
3001  /* ac pred */
3002  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
3003  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
3004 
3005  s->mbintra_table[xy]= 0;
3006 }
3007 
3008 /* generic function called after a macroblock has been parsed by the
3009  decoder or after it has been encoded by the encoder.
3010 
3011  Important variables used:
3012  s->mb_intra : true if intra macroblock
3013  s->mv_dir : motion vector direction
3014  s->mv_type : motion vector type
3015  s->mv : motion vector
3016  s->interlaced_dct : true if interlaced dct used (mpeg2)
3017  */
3018 static av_always_inline
3020  int lowres_flag, int is_mpeg12)
3021 {
3022  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
3023 
3024  if (CONFIG_XVMC &&
3025  s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
3026  s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
3027  return;
3028  }
3029 
3030  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
3031  /* print DCT coefficients */
3032  int i,j;
3033  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
3034  for(i=0; i<6; i++){
3035  for(j=0; j<64; j++){
3036  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
3037  block[i][s->idsp.idct_permutation[j]]);
3038  }
3039  av_log(s->avctx, AV_LOG_DEBUG, "\n");
3040  }
3041  }
3042 
3043  s->current_picture.qscale_table[mb_xy] = s->qscale;
3044 
3045  /* update DC predictors for P macroblocks */
3046  if (!s->mb_intra) {
3047  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
3048  if(s->mbintra_table[mb_xy])
3050  } else {
3051  s->last_dc[0] =
3052  s->last_dc[1] =
3053  s->last_dc[2] = 128 << s->intra_dc_precision;
3054  }
3055  }
3056  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
3057  s->mbintra_table[mb_xy]=1;
3058 
3059  if ( (s->flags&CODEC_FLAG_PSNR)
3061  || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
3062  uint8_t *dest_y, *dest_cb, *dest_cr;
3063  int dct_linesize, dct_offset;
3064  op_pixels_func (*op_pix)[4];
3065  qpel_mc_func (*op_qpix)[16];
3066  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3067  const int uvlinesize = s->current_picture.f->linesize[1];
3068  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
3069  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
3070 
3071  /* avoid copy if macroblock skipped in last frame too */
3072  /* skip only during decoding as we might trash the buffers during encoding a bit */
3073  if(!s->encoding){
3074  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
3075 
3076  if (s->mb_skipped) {
3077  s->mb_skipped= 0;
3079  *mbskip_ptr = 1;
3080  } else if(!s->current_picture.reference) {
3081  *mbskip_ptr = 1;
3082  } else{
3083  *mbskip_ptr = 0; /* not skipped */
3084  }
3085  }
3086 
3087  dct_linesize = linesize << s->interlaced_dct;
3088  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
3089 
3090  if(readable){
3091  dest_y= s->dest[0];
3092  dest_cb= s->dest[1];
3093  dest_cr= s->dest[2];
3094  }else{
3095  dest_y = s->b_scratchpad;
3096  dest_cb= s->b_scratchpad+16*linesize;
3097  dest_cr= s->b_scratchpad+32*linesize;
3098  }
3099 
3100  if (!s->mb_intra) {
3101  /* motion handling */
3102  /* decoding or more than one mb_type (MC was already done otherwise) */
3103  if(!s->encoding){
3104 
3105  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
3106  if (s->mv_dir & MV_DIR_FORWARD) {
3109  0);
3110  }
3111  if (s->mv_dir & MV_DIR_BACKWARD) {
3114  0);
3115  }
3116  }
3117 
3118  if(lowres_flag){
3120 
3121  if (s->mv_dir & MV_DIR_FORWARD) {
3122  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3124  }
3125  if (s->mv_dir & MV_DIR_BACKWARD) {
3126  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3127  }
3128  }else{
3129  op_qpix = s->me.qpel_put;
3130  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3131  op_pix = s->hdsp.put_pixels_tab;
3132  }else{
3133  op_pix = s->hdsp.put_no_rnd_pixels_tab;
3134  }
3135  if (s->mv_dir & MV_DIR_FORWARD) {
3136  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3137  op_pix = s->hdsp.avg_pixels_tab;
3138  op_qpix= s->me.qpel_avg;
3139  }
3140  if (s->mv_dir & MV_DIR_BACKWARD) {
3141  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3142  }
3143  }
3144  }
3145 
3146  /* skip dequant / idct if we are really late ;) */
3147  if(s->avctx->skip_idct){
3150  || s->avctx->skip_idct >= AVDISCARD_ALL)
3151  goto skip_idct;
3152  }
3153 
3154  /* add dct residue */
3156  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3157  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3158  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3159  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3160  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3161 
3162  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3163  if (s->chroma_y_shift){
3164  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3165  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3166  }else{
3167  dct_linesize >>= 1;
3168  dct_offset >>=1;
3169  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3170  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3171  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3172  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3173  }
3174  }
3175  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3176  add_dct(s, block[0], 0, dest_y , dct_linesize);
3177  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3178  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3179  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3180 
3181  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3182  if(s->chroma_y_shift){//Chroma420
3183  add_dct(s, block[4], 4, dest_cb, uvlinesize);
3184  add_dct(s, block[5], 5, dest_cr, uvlinesize);
3185  }else{
3186  //chroma422
3187  dct_linesize = uvlinesize << s->interlaced_dct;
3188  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3189 
3190  add_dct(s, block[4], 4, dest_cb, dct_linesize);
3191  add_dct(s, block[5], 5, dest_cr, dct_linesize);
3192  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3193  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3194  if(!s->chroma_x_shift){//Chroma444
3195  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3196  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3197  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3198  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3199  }
3200  }
3201  }//fi gray
3202  }
3203  else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3204  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3205  }
3206  } else {
3207  /* dct only in intra block */
3209  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3210  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3211  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3212  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3213 
3214  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3215  if(s->chroma_y_shift){
3216  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3217  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3218  }else{
3219  dct_offset >>=1;
3220  dct_linesize >>=1;
3221  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3222  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3223  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3224  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3225  }
3226  }
3227  }else{
3228  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3229  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3230  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3231  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3232 
3233  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3234  if(s->chroma_y_shift){
3235  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3236  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3237  }else{
3238 
3239  dct_linesize = uvlinesize << s->interlaced_dct;
3240  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3241 
3242  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3243  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3244  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3245  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3246  if(!s->chroma_x_shift){//Chroma444
3247  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3248  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3249  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3250  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3251  }
3252  }
3253  }//gray
3254  }
3255  }
3256 skip_idct:
3257  if(!readable){
3258  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3259  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3260  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3261  }
3262  }
3263 }
3264 
3265 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
3266 {
3267 #if !CONFIG_SMALL
3268  if(s->out_format == FMT_MPEG1) {
3269  if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 1);
3270  else mpv_decode_mb_internal(s, block, 0, 1);
3271  } else
3272 #endif
3273  if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 0);
3274  else mpv_decode_mb_internal(s, block, 0, 0);
3275 }
3276 
3278 {
3280  s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3281  s->first_field, s->low_delay);
3282 }
3283 
3284 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3285  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3286  const int uvlinesize = s->current_picture.f->linesize[1];
3287  const int mb_size= 4 - s->avctx->lowres;
3288 
3289  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3290  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3291  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3292  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3293  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3294  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3295  //block_index is not used by mpeg2, so it is not affected by chroma_format
3296 
3297  s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3298  s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3299  s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3300 
3302  {
3303  if(s->picture_structure==PICT_FRAME){
3304  s->dest[0] += s->mb_y * linesize << mb_size;
3305  s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3306  s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3307  }else{
3308  s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3309  s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3310  s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3312  }
3313  }
3314 }
3315 
3316 /**
3317  * Permute an 8x8 block.
3318  * @param block the block which will be permuted according to the given permutation vector
3319  * @param permutation the permutation vector
3320  * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3321  * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3322  * (inverse) permutated to scantable order!
3323  */
3324 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3325 {
3326  int i;
3327  int16_t temp[64];
3328 
3329  if(last<=0) return;
3330  //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3331 
3332  for(i=0; i<=last; i++){
3333  const int j= scantable[i];
3334  temp[j]= block[j];
3335  block[j]=0;
3336  }
3337 
3338  for(i=0; i<=last; i++){
3339  const int j= scantable[i];
3340  const int perm_j= permutation[j];
3341  block[perm_j]= temp[j];
3342  }
3343 }
3344 
3346  int i;
3347  MpegEncContext *s = avctx->priv_data;
3348 
3349  if (!s || !s->picture)
3350  return;
3351 
3352  for (i = 0; i < MAX_PICTURE_COUNT; i++)
3353  ff_mpeg_unref_picture(s, &s->picture[i]);
3355 
3359 
3360  s->mb_x= s->mb_y= 0;
3361  s->closed_gop= 0;
3362 
3363  s->parse_context.state= -1;
3365  s->parse_context.overread= 0;
3367  s->parse_context.index= 0;
3368  s->parse_context.last_index= 0;
3369  s->bitstream_buffer_size=0;
3370  s->pp_time=0;
3371 }
3372 
3373 /**
3374  * set qscale and update qscale dependent variables.
3375  */
3376 void ff_set_qscale(MpegEncContext * s, int qscale)
3377 {
3378  if (qscale < 1)
3379  qscale = 1;
3380  else if (qscale > 31)
3381  qscale = 31;
3382 
3383  s->qscale = qscale;
3384  s->chroma_qscale= s->chroma_qscale_table[qscale];
3385 
3386  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3388 }
3389 
3391 {
3394 }