FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
35 #include "avcodec.h"
36 #include "blockdsp.h"
37 #include "h264chroma.h"
38 #include "idctdsp.h"
39 #include "internal.h"
40 #include "mathops.h"
41 #include "mpegutils.h"
42 #include "mpegvideo.h"
43 #include "mjpegenc.h"
44 #include "msmpeg4.h"
45 #include "qpeldsp.h"
46 #include "thread.h"
47 #include <limits.h>
48 
50 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
51  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
52  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
53 };
54 
56 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
57  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
65 };
66 
67 static const uint8_t mpeg2_dc_scale_table1[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
77 };
78 
79 static const uint8_t mpeg2_dc_scale_table2[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 };
90 
91 static const uint8_t mpeg2_dc_scale_table3[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 };
102 
103 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
108 };
109 
111  0, 1, 2, 3, 8, 9, 16, 17,
112  10, 11, 4, 5, 6, 7, 15, 14,
113  13, 12, 19, 18, 24, 25, 32, 33,
114  26, 27, 20, 21, 22, 23, 28, 29,
115  30, 31, 34, 35, 40, 41, 48, 49,
116  42, 43, 36, 37, 38, 39, 44, 45,
117  46, 47, 50, 51, 56, 57, 58, 59,
118  52, 53, 54, 55, 60, 61, 62, 63,
119 };
120 
122  0, 8, 16, 24, 1, 9, 2, 10,
123  17, 25, 32, 40, 48, 56, 57, 49,
124  41, 33, 26, 18, 3, 11, 4, 12,
125  19, 27, 34, 42, 50, 58, 35, 43,
126  51, 59, 20, 28, 5, 13, 6, 14,
127  21, 29, 36, 44, 52, 60, 37, 45,
128  53, 61, 22, 30, 7, 15, 23, 31,
129  38, 46, 54, 62, 39, 47, 55, 63,
130 };
131 
133  int16_t *block, int n, int qscale)
134 {
135  int i, level, nCoeffs;
136  const uint16_t *quant_matrix;
137 
138  nCoeffs= s->block_last_index[n];
139 
140  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
141  /* XXX: only mpeg1 */
142  quant_matrix = s->intra_matrix;
143  for(i=1;i<=nCoeffs;i++) {
144  int j= s->intra_scantable.permutated[i];
145  level = block[j];
146  if (level) {
147  if (level < 0) {
148  level = -level;
149  level = (int)(level * qscale * quant_matrix[j]) >> 3;
150  level = (level - 1) | 1;
151  level = -level;
152  } else {
153  level = (int)(level * qscale * quant_matrix[j]) >> 3;
154  level = (level - 1) | 1;
155  }
156  block[j] = level;
157  }
158  }
159 }
160 
162  int16_t *block, int n, int qscale)
163 {
164  int i, level, nCoeffs;
165  const uint16_t *quant_matrix;
166 
167  nCoeffs= s->block_last_index[n];
168 
169  quant_matrix = s->inter_matrix;
170  for(i=0; i<=nCoeffs; i++) {
171  int j= s->intra_scantable.permutated[i];
172  level = block[j];
173  if (level) {
174  if (level < 0) {
175  level = -level;
176  level = (((level << 1) + 1) * qscale *
177  ((int) (quant_matrix[j]))) >> 4;
178  level = (level - 1) | 1;
179  level = -level;
180  } else {
181  level = (((level << 1) + 1) * qscale *
182  ((int) (quant_matrix[j]))) >> 4;
183  level = (level - 1) | 1;
184  }
185  block[j] = level;
186  }
187  }
188 }
189 
191  int16_t *block, int n, int qscale)
192 {
193  int i, level, nCoeffs;
194  const uint16_t *quant_matrix;
195 
196  if(s->alternate_scan) nCoeffs= 63;
197  else nCoeffs= s->block_last_index[n];
198 
199  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
200  quant_matrix = s->intra_matrix;
201  for(i=1;i<=nCoeffs;i++) {
202  int j= s->intra_scantable.permutated[i];
203  level = block[j];
204  if (level) {
205  if (level < 0) {
206  level = -level;
207  level = (int)(level * qscale * quant_matrix[j]) >> 3;
208  level = -level;
209  } else {
210  level = (int)(level * qscale * quant_matrix[j]) >> 3;
211  }
212  block[j] = level;
213  }
214  }
215 }
216 
218  int16_t *block, int n, int qscale)
219 {
220  int i, level, nCoeffs;
221  const uint16_t *quant_matrix;
222  int sum=-1;
223 
224  if(s->alternate_scan) nCoeffs= 63;
225  else nCoeffs= s->block_last_index[n];
226 
227  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
228  sum += block[0];
229  quant_matrix = s->intra_matrix;
230  for(i=1;i<=nCoeffs;i++) {
231  int j= s->intra_scantable.permutated[i];
232  level = block[j];
233  if (level) {
234  if (level < 0) {
235  level = -level;
236  level = (int)(level * qscale * quant_matrix[j]) >> 3;
237  level = -level;
238  } else {
239  level = (int)(level * qscale * quant_matrix[j]) >> 3;
240  }
241  block[j] = level;
242  sum+=level;
243  }
244  }
245  block[63]^=sum&1;
246 }
247 
249  int16_t *block, int n, int qscale)
250 {
251  int i, level, nCoeffs;
252  const uint16_t *quant_matrix;
253  int sum=-1;
254 
255  if(s->alternate_scan) nCoeffs= 63;
256  else nCoeffs= s->block_last_index[n];
257 
258  quant_matrix = s->inter_matrix;
259  for(i=0; i<=nCoeffs; i++) {
260  int j= s->intra_scantable.permutated[i];
261  level = block[j];
262  if (level) {
263  if (level < 0) {
264  level = -level;
265  level = (((level << 1) + 1) * qscale *
266  ((int) (quant_matrix[j]))) >> 4;
267  level = -level;
268  } else {
269  level = (((level << 1) + 1) * qscale *
270  ((int) (quant_matrix[j]))) >> 4;
271  }
272  block[j] = level;
273  sum+=level;
274  }
275  }
276  block[63]^=sum&1;
277 }
278 
280  int16_t *block, int n, int qscale)
281 {
282  int i, level, qmul, qadd;
283  int nCoeffs;
284 
285  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
286 
287  qmul = qscale << 1;
288 
289  if (!s->h263_aic) {
290  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
291  qadd = (qscale - 1) | 1;
292  }else{
293  qadd = 0;
294  }
295  if(s->ac_pred)
296  nCoeffs=63;
297  else
298  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
299 
300  for(i=1; i<=nCoeffs; i++) {
301  level = block[i];
302  if (level) {
303  if (level < 0) {
304  level = level * qmul - qadd;
305  } else {
306  level = level * qmul + qadd;
307  }
308  block[i] = level;
309  }
310  }
311 }
312 
314  int16_t *block, int n, int qscale)
315 {
316  int i, level, qmul, qadd;
317  int nCoeffs;
318 
319  av_assert2(s->block_last_index[n]>=0);
320 
321  qadd = (qscale - 1) | 1;
322  qmul = qscale << 1;
323 
324  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
325 
326  for(i=0; i<=nCoeffs; i++) {
327  level = block[i];
328  if (level) {
329  if (level < 0) {
330  level = level * qmul - qadd;
331  } else {
332  level = level * qmul + qadd;
333  }
334  block[i] = level;
335  }
336  }
337 }
338 
339 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
340  int (*mv)[2][4][2],
341  int mb_x, int mb_y, int mb_intra, int mb_skipped)
342 {
343  MpegEncContext *s = opaque;
344 
345  s->mv_dir = mv_dir;
346  s->mv_type = mv_type;
347  s->mb_intra = mb_intra;
348  s->mb_skipped = mb_skipped;
349  s->mb_x = mb_x;
350  s->mb_y = mb_y;
351  memcpy(s->mv, mv, sizeof(*mv));
352 
355 
356  s->bdsp.clear_blocks(s->block[0]);
357 
358  s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
359  s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
360  s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
361 
362  if (ref)
363  av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
364  ff_MPV_decode_mb(s, s->block);
365 }
366 
367 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
368 {
369  while(h--)
370  memset(dst + h*linesize, 128, 16);
371 }
372 
373 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
374 {
375  while(h--)
376  memset(dst + h*linesize, 128, 8);
377 }
378 
379 /* init common dct for both encoder and decoder */
381 {
382  ff_blockdsp_init(&s->bdsp, s->avctx);
383  ff_dsputil_init(&s->dsp, s->avctx);
384  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
385  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
386  ff_idctdsp_init(&s->idsp, s->avctx);
389 
390  if (s->avctx->debug & FF_DEBUG_NOMC) {
391  int i;
392  for (i=0; i<4; i++) {
393  s->hdsp.avg_pixels_tab[0][i] = gray16;
394  s->hdsp.put_pixels_tab[0][i] = gray16;
395  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
396 
397  s->hdsp.avg_pixels_tab[1][i] = gray8;
398  s->hdsp.put_pixels_tab[1][i] = gray8;
399  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
400  }
401  }
402 
408  if (s->flags & CODEC_FLAG_BITEXACT)
411 
412  if (ARCH_ALPHA)
414  if (ARCH_ARM)
416  if (ARCH_PPC)
418  if (ARCH_X86)
420 
421  /* load & permutate scantables
422  * note: only wmv uses different ones
423  */
424  if (s->alternate_scan) {
427  } else {
430  }
433 
434  return 0;
435 }
436 
437 static int frame_size_alloc(MpegEncContext *s, int linesize)
438 {
439  int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
440 
442  return 0;
443 
444  if (linesize < 24) {
445  av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
446  return AVERROR_PATCHWELCOME;
447  }
448 
449  // edge emu needs blocksize + filter length - 1
450  // (= 17x17 for halfpel / 21x21 for h264)
451  // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
452  // at uvlinesize. It supports only YUV420 so 24x24 is enough
453  // linesize * interlaced * MBsize
454  // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
455  FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 68,
456  fail);
457 
458  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
459  fail)
460  s->me.temp = s->me.scratchpad;
461  s->rd_scratchpad = s->me.scratchpad;
462  s->b_scratchpad = s->me.scratchpad;
463  s->obmc_scratchpad = s->me.scratchpad + 16;
464 
465  return 0;
466 fail:
468  return AVERROR(ENOMEM);
469 }
470 
471 /**
472  * Allocate a frame buffer
473  */
475 {
476  int edges_needed = av_codec_is_encoder(s->avctx->codec);
477  int r, ret;
478 
479  pic->tf.f = pic->f;
480  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
482  s->codec_id != AV_CODEC_ID_MSS2) {
483  if (edges_needed) {
484  pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
485  pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
486  }
487 
488  r = ff_thread_get_buffer(s->avctx, &pic->tf,
489  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
490  } else {
491  pic->f->width = s->avctx->width;
492  pic->f->height = s->avctx->height;
493  pic->f->format = s->avctx->pix_fmt;
494  r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
495  }
496 
497  if (r < 0 || !pic->f->buf[0]) {
498  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
499  r, pic->f->data[0]);
500  return -1;
501  }
502 
503  if (edges_needed) {
504  int i;
505  for (i = 0; pic->f->data[i]; i++) {
506  int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
507  pic->f->linesize[i] +
508  (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
509  pic->f->data[i] += offset;
510  }
511  pic->f->width = s->avctx->width;
512  pic->f->height = s->avctx->height;
513  }
514 
515  if (s->avctx->hwaccel) {
516  assert(!pic->hwaccel_picture_private);
519  if (!pic->hwaccel_priv_buf) {
520  av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
521  return -1;
522  }
524  }
525  }
526 
527  if (s->linesize && (s->linesize != pic->f->linesize[0] ||
528  s->uvlinesize != pic->f->linesize[1])) {
530  "get_buffer() failed (stride changed)\n");
531  ff_mpeg_unref_picture(s, pic);
532  return -1;
533  }
534 
535  if (pic->f->linesize[1] != pic->f->linesize[2]) {
537  "get_buffer() failed (uv stride mismatch)\n");
538  ff_mpeg_unref_picture(s, pic);
539  return -1;
540  }
541 
542  if (!s->edge_emu_buffer &&
543  (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
545  "get_buffer() failed to allocate context scratch buffers.\n");
546  ff_mpeg_unref_picture(s, pic);
547  return ret;
548  }
549 
550  return 0;
551 }
552 
554 {
555  int i;
556 
557  pic->alloc_mb_width =
558  pic->alloc_mb_height = 0;
559 
566 
567  for (i = 0; i < 2; i++) {
569  av_buffer_unref(&pic->ref_index_buf[i]);
570  }
571 }
572 
574 {
575  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
576  const int mb_array_size = s->mb_stride * s->mb_height;
577  const int b8_array_size = s->b8_stride * s->mb_height * 2;
578  int i;
579 
580 
581  pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
582  pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
583  pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
584  sizeof(uint32_t));
585  if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
586  return AVERROR(ENOMEM);
587 
588  if (s->encoding) {
589  pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
590  pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
591  pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
592  if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
593  return AVERROR(ENOMEM);
594  }
595 
596  if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
597  int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
598  int ref_index_size = 4 * mb_array_size;
599 
600  for (i = 0; mv_size && i < 2; i++) {
601  pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
602  pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
603  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
604  return AVERROR(ENOMEM);
605  }
606  }
607 
608  pic->alloc_mb_width = s->mb_width;
609  pic->alloc_mb_height = s->mb_height;
610 
611  return 0;
612 }
613 
615 {
616  int ret, i;
617 #define MAKE_WRITABLE(table) \
618 do {\
619  if (pic->table &&\
620  (ret = av_buffer_make_writable(&pic->table)) < 0)\
621  return ret;\
622 } while (0)
623 
624  MAKE_WRITABLE(mb_var_buf);
625  MAKE_WRITABLE(mc_mb_var_buf);
626  MAKE_WRITABLE(mb_mean_buf);
627  MAKE_WRITABLE(mbskip_table_buf);
628  MAKE_WRITABLE(qscale_table_buf);
629  MAKE_WRITABLE(mb_type_buf);
630 
631  for (i = 0; i < 2; i++) {
632  MAKE_WRITABLE(motion_val_buf[i]);
633  MAKE_WRITABLE(ref_index_buf[i]);
634  }
635 
636  return 0;
637 }
638 
639 /**
640  * Allocate a Picture.
641  * The pixels are allocated/set by calling get_buffer() if shared = 0
642  */
643 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
644 {
645  int i, ret;
646 
647  if (pic->qscale_table_buf)
648  if ( pic->alloc_mb_width != s->mb_width
649  || pic->alloc_mb_height != s->mb_height)
651 
652  if (shared) {
653  av_assert0(pic->f->data[0]);
654  pic->shared = 1;
655  } else {
656  av_assert0(!pic->f->buf[0]);
657 
658  if (alloc_frame_buffer(s, pic) < 0)
659  return -1;
660 
661  s->linesize = pic->f->linesize[0];
662  s->uvlinesize = pic->f->linesize[1];
663  }
664 
665  if (!pic->qscale_table_buf)
666  ret = alloc_picture_tables(s, pic);
667  else
668  ret = make_tables_writable(pic);
669  if (ret < 0)
670  goto fail;
671 
672  if (s->encoding) {
673  pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
674  pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
675  pic->mb_mean = pic->mb_mean_buf->data;
676  }
677 
678  pic->mbskip_table = pic->mbskip_table_buf->data;
679  pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
680  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
681 
682  if (pic->motion_val_buf[0]) {
683  for (i = 0; i < 2; i++) {
684  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
685  pic->ref_index[i] = pic->ref_index_buf[i]->data;
686  }
687  }
688 
689  return 0;
690 fail:
691  av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
692  ff_mpeg_unref_picture(s, pic);
694  return AVERROR(ENOMEM);
695 }
696 
697 /**
698  * Deallocate a picture.
699  */
701 {
702  int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
703 
704  pic->tf.f = pic->f;
705  /* WM Image / Screen codecs allocate internal buffers with different
706  * dimensions / colorspaces; ignore user-defined callbacks for these. */
707  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
710  ff_thread_release_buffer(s->avctx, &pic->tf);
711  else if (pic->f)
712  av_frame_unref(pic->f);
713 
715 
716  if (pic->needs_realloc)
718 
719  memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
720 }
721 
723 {
724  int i;
725 
726 #define UPDATE_TABLE(table)\
727 do {\
728  if (src->table &&\
729  (!dst->table || dst->table->buffer != src->table->buffer)) {\
730  av_buffer_unref(&dst->table);\
731  dst->table = av_buffer_ref(src->table);\
732  if (!dst->table) {\
733  ff_free_picture_tables(dst);\
734  return AVERROR(ENOMEM);\
735  }\
736  }\
737 } while (0)
738 
739  UPDATE_TABLE(mb_var_buf);
740  UPDATE_TABLE(mc_mb_var_buf);
741  UPDATE_TABLE(mb_mean_buf);
742  UPDATE_TABLE(mbskip_table_buf);
743  UPDATE_TABLE(qscale_table_buf);
744  UPDATE_TABLE(mb_type_buf);
745  for (i = 0; i < 2; i++) {
746  UPDATE_TABLE(motion_val_buf[i]);
747  UPDATE_TABLE(ref_index_buf[i]);
748  }
749 
750  dst->mb_var = src->mb_var;
751  dst->mc_mb_var = src->mc_mb_var;
752  dst->mb_mean = src->mb_mean;
753  dst->mbskip_table = src->mbskip_table;
754  dst->qscale_table = src->qscale_table;
755  dst->mb_type = src->mb_type;
756  for (i = 0; i < 2; i++) {
757  dst->motion_val[i] = src->motion_val[i];
758  dst->ref_index[i] = src->ref_index[i];
759  }
760 
761  dst->alloc_mb_width = src->alloc_mb_width;
762  dst->alloc_mb_height = src->alloc_mb_height;
763 
764  return 0;
765 }
766 
768 {
769  int ret;
770 
771  av_assert0(!dst->f->buf[0]);
772  av_assert0(src->f->buf[0]);
773 
774  src->tf.f = src->f;
775  dst->tf.f = dst->f;
776  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
777  if (ret < 0)
778  goto fail;
779 
780  ret = update_picture_tables(dst, src);
781  if (ret < 0)
782  goto fail;
783 
784  if (src->hwaccel_picture_private) {
786  if (!dst->hwaccel_priv_buf)
787  goto fail;
789  }
790 
791  dst->field_picture = src->field_picture;
792  dst->mb_var_sum = src->mb_var_sum;
793  dst->mc_mb_var_sum = src->mc_mb_var_sum;
794  dst->b_frame_score = src->b_frame_score;
795  dst->needs_realloc = src->needs_realloc;
796  dst->reference = src->reference;
797  dst->shared = src->shared;
798 
799  return 0;
800 fail:
801  ff_mpeg_unref_picture(s, dst);
802  return ret;
803 }
804 
806 {
807  int16_t (*tmp)[64];
808 
809  tmp = s->pblocks[4];
810  s->pblocks[4] = s->pblocks[5];
811  s->pblocks[5] = tmp;
812 }
813 
815 {
816  int y_size = s->b8_stride * (2 * s->mb_height + 1);
817  int c_size = s->mb_stride * (s->mb_height + 1);
818  int yc_size = y_size + 2 * c_size;
819  int i;
820 
821  if (s->mb_height & 1)
822  yc_size += 2*s->b8_stride + 2*s->mb_stride;
823 
824  s->edge_emu_buffer =
825  s->me.scratchpad =
826  s->me.temp =
827  s->rd_scratchpad =
828  s->b_scratchpad =
829  s->obmc_scratchpad = NULL;
830 
831  if (s->encoding) {
832  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
833  ME_MAP_SIZE * sizeof(uint32_t), fail)
835  ME_MAP_SIZE * sizeof(uint32_t), fail)
836  if (s->avctx->noise_reduction) {
838  2 * 64 * sizeof(int), fail)
839  }
840  }
841  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
842  s->block = s->blocks[0];
843 
844  for (i = 0; i < 12; i++) {
845  s->pblocks[i] = &s->block[i];
846  }
847  if (s->avctx->codec_tag == AV_RL32("VCR2"))
848  exchange_uv(s);
849 
850  if (s->out_format == FMT_H263) {
851  /* ac values */
853  yc_size * sizeof(int16_t) * 16, fail);
854  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
855  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
856  s->ac_val[2] = s->ac_val[1] + c_size;
857  }
858 
859  return 0;
860 fail:
861  return -1; // free() through ff_MPV_common_end()
862 }
863 
865 {
866  if (s == NULL)
867  return;
868 
870  av_freep(&s->me.scratchpad);
871  s->me.temp =
872  s->rd_scratchpad =
873  s->b_scratchpad =
874  s->obmc_scratchpad = NULL;
875 
876  av_freep(&s->dct_error_sum);
877  av_freep(&s->me.map);
878  av_freep(&s->me.score_map);
879  av_freep(&s->blocks);
880  av_freep(&s->ac_val_base);
881  s->block = NULL;
882 }
883 
885 {
886 #define COPY(a) bak->a = src->a
887  COPY(edge_emu_buffer);
888  COPY(me.scratchpad);
889  COPY(me.temp);
890  COPY(rd_scratchpad);
891  COPY(b_scratchpad);
892  COPY(obmc_scratchpad);
893  COPY(me.map);
894  COPY(me.score_map);
895  COPY(blocks);
896  COPY(block);
897  COPY(start_mb_y);
898  COPY(end_mb_y);
899  COPY(me.map_generation);
900  COPY(pb);
901  COPY(dct_error_sum);
902  COPY(dct_count[0]);
903  COPY(dct_count[1]);
904  COPY(ac_val_base);
905  COPY(ac_val[0]);
906  COPY(ac_val[1]);
907  COPY(ac_val[2]);
908 #undef COPY
909 }
910 
912 {
913  MpegEncContext bak;
914  int i, ret;
915  // FIXME copy only needed parts
916  // START_TIMER
917  backup_duplicate_context(&bak, dst);
918  memcpy(dst, src, sizeof(MpegEncContext));
919  backup_duplicate_context(dst, &bak);
920  for (i = 0; i < 12; i++) {
921  dst->pblocks[i] = &dst->block[i];
922  }
923  if (dst->avctx->codec_tag == AV_RL32("VCR2"))
924  exchange_uv(dst);
925  if (!dst->edge_emu_buffer &&
926  (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
927  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
928  "scratch buffers.\n");
929  return ret;
930  }
931  // STOP_TIMER("update_duplicate_context")
932  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
933  return 0;
934 }
935 
937  const AVCodecContext *src)
938 {
939  int i, ret;
940  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
941 
942  if (dst == src)
943  return 0;
944 
945  av_assert0(s != s1);
946 
947  // FIXME can parameters change on I-frames?
948  // in that case dst may need a reinit
949  if (!s->context_initialized) {
950  memcpy(s, s1, sizeof(MpegEncContext));
951 
952  s->avctx = dst;
953  s->bitstream_buffer = NULL;
955 
956  if (s1->context_initialized){
957 // s->picture_range_start += MAX_PICTURE_COUNT;
958 // s->picture_range_end += MAX_PICTURE_COUNT;
959  if((ret = ff_MPV_common_init(s)) < 0){
960  memset(s, 0, sizeof(MpegEncContext));
961  s->avctx = dst;
962  return ret;
963  }
964  }
965  }
966 
967  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
968  s->context_reinit = 0;
969  s->height = s1->height;
970  s->width = s1->width;
971  if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
972  return ret;
973  }
974 
975  s->avctx->coded_height = s1->avctx->coded_height;
976  s->avctx->coded_width = s1->avctx->coded_width;
977  s->avctx->width = s1->avctx->width;
978  s->avctx->height = s1->avctx->height;
979 
980  s->coded_picture_number = s1->coded_picture_number;
981  s->picture_number = s1->picture_number;
982 
983  av_assert0(!s->picture || s->picture != s1->picture);
984  if(s->picture)
985  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
986  ff_mpeg_unref_picture(s, &s->picture[i]);
987  if (s1->picture[i].f->buf[0] &&
988  (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
989  return ret;
990  }
991 
992 #define UPDATE_PICTURE(pic)\
993 do {\
994  ff_mpeg_unref_picture(s, &s->pic);\
995  if (s1->pic.f && s1->pic.f->buf[0])\
996  ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
997  else\
998  ret = update_picture_tables(&s->pic, &s1->pic);\
999  if (ret < 0)\
1000  return ret;\
1001 } while (0)
1002 
1003  UPDATE_PICTURE(current_picture);
1004  UPDATE_PICTURE(last_picture);
1005  UPDATE_PICTURE(next_picture);
1006 
1007  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1008  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1009  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1010 
1011  // Error/bug resilience
1012  s->next_p_frame_damaged = s1->next_p_frame_damaged;
1013  s->workaround_bugs = s1->workaround_bugs;
1014  s->padding_bug_score = s1->padding_bug_score;
1015 
1016  // MPEG4 timing info
1017  memcpy(&s->last_time_base, &s1->last_time_base,
1018  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1019  (char *) &s1->last_time_base);
1020 
1021  // B-frame info
1022  s->max_b_frames = s1->max_b_frames;
1023  s->low_delay = s1->low_delay;
1024  s->droppable = s1->droppable;
1025 
1026  // DivX handling (doesn't work)
1027  s->divx_packed = s1->divx_packed;
1028 
1029  if (s1->bitstream_buffer) {
1030  if (s1->bitstream_buffer_size +
1034  s1->allocated_bitstream_buffer_size);
1035  s->bitstream_buffer_size = s1->bitstream_buffer_size;
1036  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1037  s1->bitstream_buffer_size);
1038  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1040  }
1041 
1042  // linesize dependend scratch buffer allocation
1043  if (!s->edge_emu_buffer)
1044  if (s1->linesize) {
1045  if (frame_size_alloc(s, s1->linesize) < 0) {
1046  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1047  "scratch buffers.\n");
1048  return AVERROR(ENOMEM);
1049  }
1050  } else {
1051  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1052  "be allocated due to unknown size.\n");
1053  }
1054 
1055  // MPEG2/interlacing info
1056  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1057  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1058 
1059  if (!s1->first_field) {
1060  s->last_pict_type = s1->pict_type;
1061  if (s1->current_picture_ptr)
1062  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1063  }
1064 
1065  return 0;
1066 }
1067 
1068 /**
1069  * Set the given MpegEncContext to common defaults
1070  * (same for encoding and decoding).
1071  * The changed fields will not depend upon the
1072  * prior state of the MpegEncContext.
1073  */
1075 {
1076  s->y_dc_scale_table =
1079  s->progressive_frame = 1;
1080  s->progressive_sequence = 1;
1082 
1083  s->coded_picture_number = 0;
1084  s->picture_number = 0;
1085 
1086  s->f_code = 1;
1087  s->b_code = 1;
1088 
1089  s->slice_context_count = 1;
1090 }
1091 
1092 /**
1093  * Set the given MpegEncContext to defaults for decoding.
1094  * the changed fields will not depend upon
1095  * the prior state of the MpegEncContext.
1096  */
1098 {
1100 }
1101 
1103 {
1104  ERContext *er = &s->er;
1105  int mb_array_size = s->mb_height * s->mb_stride;
1106  int i;
1107 
1108  er->avctx = s->avctx;
1109  er->dsp = &s->dsp;
1110 
1111  er->mb_index2xy = s->mb_index2xy;
1112  er->mb_num = s->mb_num;
1113  er->mb_width = s->mb_width;
1114  er->mb_height = s->mb_height;
1115  er->mb_stride = s->mb_stride;
1116  er->b8_stride = s->b8_stride;
1117 
1119  er->error_status_table = av_mallocz(mb_array_size);
1120  if (!er->er_temp_buffer || !er->error_status_table)
1121  goto fail;
1122 
1123  er->mbskip_table = s->mbskip_table;
1124  er->mbintra_table = s->mbintra_table;
1125 
1126  for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1127  er->dc_val[i] = s->dc_val[i];
1128 
1130  er->opaque = s;
1131 
1132  return 0;
1133 fail:
1134  av_freep(&er->er_temp_buffer);
1136  return AVERROR(ENOMEM);
1137 }
1138 
1139 /**
1140  * Initialize and allocates MpegEncContext fields dependent on the resolution.
1141  */
1143 {
1144  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1145 
1146  s->mb_width = (s->width + 15) / 16;
1147  s->mb_stride = s->mb_width + 1;
1148  s->b8_stride = s->mb_width * 2 + 1;
1149  mb_array_size = s->mb_height * s->mb_stride;
1150  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1151 
1152  /* set default edge pos, will be overridden
1153  * in decode_header if needed */
1154  s->h_edge_pos = s->mb_width * 16;
1155  s->v_edge_pos = s->mb_height * 16;
1156 
1157  s->mb_num = s->mb_width * s->mb_height;
1158 
1159  s->block_wrap[0] =
1160  s->block_wrap[1] =
1161  s->block_wrap[2] =
1162  s->block_wrap[3] = s->b8_stride;
1163  s->block_wrap[4] =
1164  s->block_wrap[5] = s->mb_stride;
1165 
1166  y_size = s->b8_stride * (2 * s->mb_height + 1);
1167  c_size = s->mb_stride * (s->mb_height + 1);
1168  yc_size = y_size + 2 * c_size;
1169 
1170  if (s->mb_height & 1)
1171  yc_size += 2*s->b8_stride + 2*s->mb_stride;
1172 
1173  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1174  for (y = 0; y < s->mb_height; y++)
1175  for (x = 0; x < s->mb_width; x++)
1176  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1177 
1178  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1179 
1180  if (s->encoding) {
1181  /* Allocate MV tables */
1182  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1183  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1184  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1185  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1186  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1187  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1188  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1194 
1195  /* Allocate MB type table */
1196  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1197 
1198  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1199 
1201  mb_array_size * sizeof(float), fail);
1203  mb_array_size * sizeof(float), fail);
1204 
1205  }
1206 
1207  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1209  /* interlaced direct mode decoding tables */
1210  for (i = 0; i < 2; i++) {
1211  int j, k;
1212  for (j = 0; j < 2; j++) {
1213  for (k = 0; k < 2; k++) {
1215  s->b_field_mv_table_base[i][j][k],
1216  mv_table_size * 2 * sizeof(int16_t),
1217  fail);
1218  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1219  s->mb_stride + 1;
1220  }
1221  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1222  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1223  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1224  }
1225  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1226  }
1227  }
1228  if (s->out_format == FMT_H263) {
1229  /* cbp values */
1230  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1231  s->coded_block = s->coded_block_base + s->b8_stride + 1;
1232 
1233  /* cbp, ac_pred, pred_dir */
1234  FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1235  FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1236  }
1237 
1238  if (s->h263_pred || s->h263_plus || !s->encoding) {
1239  /* dc values */
1240  // MN: we need these for error resilience of intra-frames
1241  FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1242  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1243  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1244  s->dc_val[2] = s->dc_val[1] + c_size;
1245  for (i = 0; i < yc_size; i++)
1246  s->dc_val_base[i] = 1024;
1247  }
1248 
1249  /* which mb is a intra block */
1250  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1251  memset(s->mbintra_table, 1, mb_array_size);
1252 
1253  /* init macroblock skip table */
1254  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1255  // Note the + 1 is for a quicker mpeg4 slice_end detection
1256 
1257  return init_er(s);
1258 fail:
1259  return AVERROR(ENOMEM);
1260 }
1261 
1262 /**
1263  * init common structure for both encoder and decoder.
1264  * this assumes that some variables like width/height are already set
1265  */
1267 {
1268  int i;
1269  int nb_slices = (HAVE_THREADS &&
1271  s->avctx->thread_count : 1;
1272 
1273  if (s->encoding && s->avctx->slices)
1274  nb_slices = s->avctx->slices;
1275 
1277  s->mb_height = (s->height + 31) / 32 * 2;
1278  else
1279  s->mb_height = (s->height + 15) / 16;
1280 
1281  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1283  "decoding to AV_PIX_FMT_NONE is not supported.\n");
1284  return -1;
1285  }
1286 
1287  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1288  int max_slices;
1289  if (s->mb_height)
1290  max_slices = FFMIN(MAX_THREADS, s->mb_height);
1291  else
1292  max_slices = MAX_THREADS;
1293  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1294  " reducing to %d\n", nb_slices, max_slices);
1295  nb_slices = max_slices;
1296  }
1297 
1298  if ((s->width || s->height) &&
1299  av_image_check_size(s->width, s->height, 0, s->avctx))
1300  return -1;
1301 
1302  ff_dct_common_init(s);
1303 
1304  s->flags = s->avctx->flags;
1305  s->flags2 = s->avctx->flags2;
1306 
1307  /* set chroma shifts */
1309  &s->chroma_x_shift,
1310  &s->chroma_y_shift);
1311 
1312  /* convert fourcc to upper case */
1314 
1316 
1318  MAX_PICTURE_COUNT * sizeof(Picture), fail);
1319  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1320  s->picture[i].f = av_frame_alloc();
1321  if (!s->picture[i].f)
1322  goto fail;
1323  }
1324  memset(&s->next_picture, 0, sizeof(s->next_picture));
1325  memset(&s->last_picture, 0, sizeof(s->last_picture));
1326  memset(&s->current_picture, 0, sizeof(s->current_picture));
1327  memset(&s->new_picture, 0, sizeof(s->new_picture));
1328  s->next_picture.f = av_frame_alloc();
1329  if (!s->next_picture.f)
1330  goto fail;
1331  s->last_picture.f = av_frame_alloc();
1332  if (!s->last_picture.f)
1333  goto fail;
1335  if (!s->current_picture.f)
1336  goto fail;
1337  s->new_picture.f = av_frame_alloc();
1338  if (!s->new_picture.f)
1339  goto fail;
1340 
1341  if (init_context_frame(s))
1342  goto fail;
1343 
1344  s->parse_context.state = -1;
1345 
1346  s->context_initialized = 1;
1347  s->thread_context[0] = s;
1348 
1349 // if (s->width && s->height) {
1350  if (nb_slices > 1) {
1351  for (i = 1; i < nb_slices; i++) {
1352  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1353  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1354  }
1355 
1356  for (i = 0; i < nb_slices; i++) {
1357  if (init_duplicate_context(s->thread_context[i]) < 0)
1358  goto fail;
1359  s->thread_context[i]->start_mb_y =
1360  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1361  s->thread_context[i]->end_mb_y =
1362  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1363  }
1364  } else {
1365  if (init_duplicate_context(s) < 0)
1366  goto fail;
1367  s->start_mb_y = 0;
1368  s->end_mb_y = s->mb_height;
1369  }
1370  s->slice_context_count = nb_slices;
1371 // }
1372 
1373  return 0;
1374  fail:
1375  ff_MPV_common_end(s);
1376  return -1;
1377 }
1378 
1379 /**
1380  * Frees and resets MpegEncContext fields depending on the resolution.
1381  * Is used during resolution changes to avoid a full reinitialization of the
1382  * codec.
1383  */
1385 {
1386  int i, j, k;
1387 
1388  av_freep(&s->mb_type);
1395  s->p_mv_table = NULL;
1396  s->b_forw_mv_table = NULL;
1397  s->b_back_mv_table = NULL;
1398  s->b_bidir_forw_mv_table = NULL;
1399  s->b_bidir_back_mv_table = NULL;
1400  s->b_direct_mv_table = NULL;
1401  for (i = 0; i < 2; i++) {
1402  for (j = 0; j < 2; j++) {
1403  for (k = 0; k < 2; k++) {
1404  av_freep(&s->b_field_mv_table_base[i][j][k]);
1405  s->b_field_mv_table[i][j][k] = NULL;
1406  }
1407  av_freep(&s->b_field_select_table[i][j]);
1408  av_freep(&s->p_field_mv_table_base[i][j]);
1409  s->p_field_mv_table[i][j] = NULL;
1410  }
1412  }
1413 
1414  av_freep(&s->dc_val_base);
1416  av_freep(&s->mbintra_table);
1417  av_freep(&s->cbp_table);
1418  av_freep(&s->pred_dir_table);
1419 
1420  av_freep(&s->mbskip_table);
1421 
1423  av_freep(&s->er.er_temp_buffer);
1424  av_freep(&s->mb_index2xy);
1425  av_freep(&s->lambda_table);
1426 
1427  av_freep(&s->cplx_tab);
1428  av_freep(&s->bits_tab);
1429 
1430  s->linesize = s->uvlinesize = 0;
1431 
1432  return 0;
1433 }
1434 
1436 {
1437  int i, err = 0;
1438 
1439  if (s->slice_context_count > 1) {
1440  for (i = 0; i < s->slice_context_count; i++) {
1442  }
1443  for (i = 1; i < s->slice_context_count; i++) {
1444  av_freep(&s->thread_context[i]);
1445  }
1446  } else
1448 
1449  if ((err = free_context_frame(s)) < 0)
1450  return err;
1451 
1452  if (s->picture)
1453  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1454  s->picture[i].needs_realloc = 1;
1455  }
1456 
1457  s->last_picture_ptr =
1458  s->next_picture_ptr =
1459  s->current_picture_ptr = NULL;
1460 
1461  // init
1463  s->mb_height = (s->height + 31) / 32 * 2;
1464  else
1465  s->mb_height = (s->height + 15) / 16;
1466 
1467  if ((s->width || s->height) &&
1468  av_image_check_size(s->width, s->height, 0, s->avctx))
1469  return AVERROR_INVALIDDATA;
1470 
1471  if ((err = init_context_frame(s)))
1472  goto fail;
1473 
1474  s->thread_context[0] = s;
1475 
1476  if (s->width && s->height) {
1477  int nb_slices = s->slice_context_count;
1478  if (nb_slices > 1) {
1479  for (i = 1; i < nb_slices; i++) {
1480  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1481  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1482  }
1483 
1484  for (i = 0; i < nb_slices; i++) {
1485  if (init_duplicate_context(s->thread_context[i]) < 0)
1486  goto fail;
1487  s->thread_context[i]->start_mb_y =
1488  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1489  s->thread_context[i]->end_mb_y =
1490  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1491  }
1492  } else {
1493  err = init_duplicate_context(s);
1494  if (err < 0)
1495  goto fail;
1496  s->start_mb_y = 0;
1497  s->end_mb_y = s->mb_height;
1498  }
1499  s->slice_context_count = nb_slices;
1500  }
1501 
1502  return 0;
1503  fail:
1504  ff_MPV_common_end(s);
1505  return err;
1506 }
1507 
1508 /* init common structure for both encoder and decoder */
1510 {
1511  int i;
1512 
1513  if (s->slice_context_count > 1) {
1514  for (i = 0; i < s->slice_context_count; i++) {
1516  }
1517  for (i = 1; i < s->slice_context_count; i++) {
1518  av_freep(&s->thread_context[i]);
1519  }
1520  s->slice_context_count = 1;
1521  } else free_duplicate_context(s);
1522 
1524  s->parse_context.buffer_size = 0;
1525 
1528 
1529  if (s->picture) {
1530  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1532  ff_mpeg_unref_picture(s, &s->picture[i]);
1533  av_frame_free(&s->picture[i].f);
1534  }
1535  }
1536  av_freep(&s->picture);
1549 
1550  free_context_frame(s);
1551 
1552  s->context_initialized = 0;
1553  s->last_picture_ptr =
1554  s->next_picture_ptr =
1555  s->current_picture_ptr = NULL;
1556  s->linesize = s->uvlinesize = 0;
1557 }
1558 
1560  uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1561 {
1562  int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1563  uint8_t index_run[MAX_RUN + 1];
1564  int last, run, level, start, end, i;
1565 
1566  /* If table is static, we can quit if rl->max_level[0] is not NULL */
1567  if (static_store && rl->max_level[0])
1568  return;
1569 
1570  /* compute max_level[], max_run[] and index_run[] */
1571  for (last = 0; last < 2; last++) {
1572  if (last == 0) {
1573  start = 0;
1574  end = rl->last;
1575  } else {
1576  start = rl->last;
1577  end = rl->n;
1578  }
1579 
1580  memset(max_level, 0, MAX_RUN + 1);
1581  memset(max_run, 0, MAX_LEVEL + 1);
1582  memset(index_run, rl->n, MAX_RUN + 1);
1583  for (i = start; i < end; i++) {
1584  run = rl->table_run[i];
1585  level = rl->table_level[i];
1586  if (index_run[run] == rl->n)
1587  index_run[run] = i;
1588  if (level > max_level[run])
1589  max_level[run] = level;
1590  if (run > max_run[level])
1591  max_run[level] = run;
1592  }
1593  if (static_store)
1594  rl->max_level[last] = static_store[last];
1595  else
1596  rl->max_level[last] = av_malloc(MAX_RUN + 1);
1597  memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1598  if (static_store)
1599  rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1600  else
1601  rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1602  memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1603  if (static_store)
1604  rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1605  else
1606  rl->index_run[last] = av_malloc(MAX_RUN + 1);
1607  memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1608  }
1609 }
1610 
1612 {
1613  int i, q;
1614 
1615  for (q = 0; q < 32; q++) {
1616  int qmul = q * 2;
1617  int qadd = (q - 1) | 1;
1618 
1619  if (q == 0) {
1620  qmul = 1;
1621  qadd = 0;
1622  }
1623  for (i = 0; i < rl->vlc.table_size; i++) {
1624  int code = rl->vlc.table[i][0];
1625  int len = rl->vlc.table[i][1];
1626  int level, run;
1627 
1628  if (len == 0) { // illegal code
1629  run = 66;
1630  level = MAX_LEVEL;
1631  } else if (len < 0) { // more bits needed
1632  run = 0;
1633  level = code;
1634  } else {
1635  if (code == rl->n) { // esc
1636  run = 66;
1637  level = 0;
1638  } else {
1639  run = rl->table_run[code] + 1;
1640  level = rl->table_level[code] * qmul + qadd;
1641  if (code >= rl->last) run += 192;
1642  }
1643  }
1644  rl->rl_vlc[q][i].len = len;
1645  rl->rl_vlc[q][i].level = level;
1646  rl->rl_vlc[q][i].run = run;
1647  }
1648  }
1649 }
1650 
1652 {
1653  int i;
1654 
1655  /* release non reference frames */
1656  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1657  if (!s->picture[i].reference)
1658  ff_mpeg_unref_picture(s, &s->picture[i]);
1659  }
1660 }
1661 
1662 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1663 {
1664  if (pic == s->last_picture_ptr)
1665  return 0;
1666  if (pic->f->buf[0] == NULL)
1667  return 1;
1668  if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1669  return 1;
1670  return 0;
1671 }
1672 
1673 static int find_unused_picture(MpegEncContext *s, int shared)
1674 {
1675  int i;
1676 
1677  if (shared) {
1678  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1679  if (s->picture[i].f->buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1680  return i;
1681  }
1682  } else {
1683  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1684  if (pic_is_unused(s, &s->picture[i]))
1685  return i;
1686  }
1687  }
1688 
1690  "Internal error, picture buffer overflow\n");
1691  /* We could return -1, but the codec would crash trying to draw into a
1692  * non-existing frame anyway. This is safer than waiting for a random crash.
1693  * Also the return of this is never useful, an encoder must only allocate
1694  * as much as allowed in the specification. This has no relationship to how
1695  * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1696  * enough for such valid streams).
1697  * Plus, a decoder has to check stream validity and remove frames if too
1698  * many reference frames are around. Waiting for "OOM" is not correct at
1699  * all. Similarly, missing reference frames have to be replaced by
1700  * interpolated/MC frames, anything else is a bug in the codec ...
1701  */
1702  abort();
1703  return -1;
1704 }
1705 
1707 {
1708  int ret = find_unused_picture(s, shared);
1709 
1710  if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1711  if (s->picture[ret].needs_realloc) {
1712  s->picture[ret].needs_realloc = 0;
1713  ff_free_picture_tables(&s->picture[ret]);
1714  ff_mpeg_unref_picture(s, &s->picture[ret]);
1715  }
1716  }
1717  return ret;
1718 }
1719 
1720 static void gray_frame(AVFrame *frame)
1721 {
1722  int i, h_chroma_shift, v_chroma_shift;
1723 
1724  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1725 
1726  for(i=0; i<frame->height; i++)
1727  memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1728  for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1729  memset(frame->data[1] + frame->linesize[1]*i,
1730  0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1731  memset(frame->data[2] + frame->linesize[2]*i,
1732  0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1733  }
1734 }
1735 
1736 /**
1737  * generic function called after decoding
1738  * the header and before a frame is decoded.
1739  */
1741 {
1742  int i, ret;
1743  Picture *pic;
1744  s->mb_skipped = 0;
1745 
1746  if (!ff_thread_can_start_frame(avctx)) {
1747  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1748  return -1;
1749  }
1750 
1751  /* mark & release old frames */
1752  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1754  s->last_picture_ptr->f->buf[0]) {
1756  }
1757 
1758  /* release forgotten pictures */
1759  /* if (mpeg124/h263) */
1760  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1761  if (&s->picture[i] != s->last_picture_ptr &&
1762  &s->picture[i] != s->next_picture_ptr &&
1763  s->picture[i].reference && !s->picture[i].needs_realloc) {
1764  if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1765  av_log(avctx, AV_LOG_ERROR,
1766  "releasing zombie picture\n");
1767  ff_mpeg_unref_picture(s, &s->picture[i]);
1768  }
1769  }
1770 
1772 
1774 
1775  if (s->current_picture_ptr &&
1776  s->current_picture_ptr->f->buf[0] == NULL) {
1777  // we already have a unused image
1778  // (maybe it was set before reading the header)
1779  pic = s->current_picture_ptr;
1780  } else {
1781  i = ff_find_unused_picture(s, 0);
1782  if (i < 0) {
1783  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1784  return i;
1785  }
1786  pic = &s->picture[i];
1787  }
1788 
1789  pic->reference = 0;
1790  if (!s->droppable) {
1791  if (s->pict_type != AV_PICTURE_TYPE_B)
1792  pic->reference = 3;
1793  }
1794 
1796 
1797  if (ff_alloc_picture(s, pic, 0) < 0)
1798  return -1;
1799 
1800  s->current_picture_ptr = pic;
1801  // FIXME use only the vars from current_pic
1803  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1805  if (s->picture_structure != PICT_FRAME)
1808  }
1812 
1814  // if (s->flags && CODEC_FLAG_QSCALE)
1815  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1817 
1818  if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1819  s->current_picture_ptr)) < 0)
1820  return ret;
1821 
1822  if (s->pict_type != AV_PICTURE_TYPE_B) {
1824  if (!s->droppable)
1826  }
1827  av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1829  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1830  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1831  s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1832  s->pict_type, s->droppable);
1833 
1834  if ((s->last_picture_ptr == NULL ||
1835  s->last_picture_ptr->f->buf[0] == NULL) &&
1836  (s->pict_type != AV_PICTURE_TYPE_I ||
1837  s->picture_structure != PICT_FRAME)) {
1838  int h_chroma_shift, v_chroma_shift;
1840  &h_chroma_shift, &v_chroma_shift);
1842  av_log(avctx, AV_LOG_DEBUG,
1843  "allocating dummy last picture for B frame\n");
1844  else if (s->pict_type != AV_PICTURE_TYPE_I)
1845  av_log(avctx, AV_LOG_ERROR,
1846  "warning: first frame is no keyframe\n");
1847  else if (s->picture_structure != PICT_FRAME)
1848  av_log(avctx, AV_LOG_DEBUG,
1849  "allocate dummy last picture for field based first keyframe\n");
1850 
1851  /* Allocate a dummy frame */
1852  i = ff_find_unused_picture(s, 0);
1853  if (i < 0) {
1854  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1855  return i;
1856  }
1857  s->last_picture_ptr = &s->picture[i];
1858 
1859  s->last_picture_ptr->reference = 3;
1860  s->last_picture_ptr->f->key_frame = 0;
1862 
1863  if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1864  s->last_picture_ptr = NULL;
1865  return -1;
1866  }
1867 
1868  if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1869  for(i=0; i<avctx->height; i++)
1870  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1871  0x80, avctx->width);
1872  for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1873  memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1874  0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1875  memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1876  0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1877  }
1878 
1880  for(i=0; i<avctx->height; i++)
1881  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1882  }
1883  }
1884 
1885  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1886  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1887  }
1888  if ((s->next_picture_ptr == NULL ||
1889  s->next_picture_ptr->f->buf[0] == NULL) &&
1890  s->pict_type == AV_PICTURE_TYPE_B) {
1891  /* Allocate a dummy frame */
1892  i = ff_find_unused_picture(s, 0);
1893  if (i < 0) {
1894  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1895  return i;
1896  }
1897  s->next_picture_ptr = &s->picture[i];
1898 
1899  s->next_picture_ptr->reference = 3;
1900  s->next_picture_ptr->f->key_frame = 0;
1902 
1903  if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1904  s->next_picture_ptr = NULL;
1905  return -1;
1906  }
1907  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1908  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1909  }
1910 
1911 #if 0 // BUFREF-FIXME
1912  memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1913  memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1914 #endif
1915  if (s->last_picture_ptr) {
1917  if (s->last_picture_ptr->f->buf[0] &&
1918  (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1919  s->last_picture_ptr)) < 0)
1920  return ret;
1921  }
1922  if (s->next_picture_ptr) {
1924  if (s->next_picture_ptr->f->buf[0] &&
1925  (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1926  s->next_picture_ptr)) < 0)
1927  return ret;
1928  }
1929 
1931  s->last_picture_ptr->f->buf[0]));
1932 
1933  if (s->picture_structure!= PICT_FRAME) {
1934  int i;
1935  for (i = 0; i < 4; i++) {
1937  s->current_picture.f->data[i] +=
1938  s->current_picture.f->linesize[i];
1939  }
1940  s->current_picture.f->linesize[i] *= 2;
1941  s->last_picture.f->linesize[i] *= 2;
1942  s->next_picture.f->linesize[i] *= 2;
1943  }
1944  }
1945 
1946  s->err_recognition = avctx->err_recognition;
1947 
1948  /* set dequantizer, we can't do it during init as
1949  * it might change for mpeg4 and we can't do it in the header
1950  * decode as init is not called for mpeg4 there yet */
1951  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1954  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1957  } else {
1960  }
1961 
1962  if (s->avctx->debug & FF_DEBUG_NOMC) {
1964  }
1965 
1966  return 0;
1967 }
1968 
1969 /* called after a frame has been decoded. */
1971 {
1972  emms_c();
1973 
1974  if (s->current_picture.reference)
1976 }
1977 
1978 
1979 static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
1980 {
1981  if(*sx > *ex)
1982  return clip_line(ex, ey, sx, sy, maxx);
1983 
1984  if (*sx < 0) {
1985  if (*ex < 0)
1986  return 1;
1987  *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
1988  *sx = 0;
1989  }
1990 
1991  if (*ex > maxx) {
1992  if (*sx > maxx)
1993  return 1;
1994  *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
1995  *ex = maxx;
1996  }
1997  return 0;
1998 }
1999 
2000 
2001 /**
2002  * Draw a line from (ex, ey) -> (sx, sy).
2003  * @param w width of the image
2004  * @param h height of the image
2005  * @param stride stride/linesize of the image
2006  * @param color color of the arrow
2007  */
2008 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
2009  int w, int h, int stride, int color)
2010 {
2011  int x, y, fr, f;
2012 
2013  if (clip_line(&sx, &sy, &ex, &ey, w - 1))
2014  return;
2015  if (clip_line(&sy, &sx, &ey, &ex, h - 1))
2016  return;
2017 
2018  sx = av_clip(sx, 0, w - 1);
2019  sy = av_clip(sy, 0, h - 1);
2020  ex = av_clip(ex, 0, w - 1);
2021  ey = av_clip(ey, 0, h - 1);
2022 
2023  buf[sy * stride + sx] += color;
2024 
2025  if (FFABS(ex - sx) > FFABS(ey - sy)) {
2026  if (sx > ex) {
2027  FFSWAP(int, sx, ex);
2028  FFSWAP(int, sy, ey);
2029  }
2030  buf += sx + sy * stride;
2031  ex -= sx;
2032  f = ((ey - sy) << 16) / ex;
2033  for (x = 0; x <= ex; x++) {
2034  y = (x * f) >> 16;
2035  fr = (x * f) & 0xFFFF;
2036  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2037  if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2038  }
2039  } else {
2040  if (sy > ey) {
2041  FFSWAP(int, sx, ex);
2042  FFSWAP(int, sy, ey);
2043  }
2044  buf += sx + sy * stride;
2045  ey -= sy;
2046  if (ey)
2047  f = ((ex - sx) << 16) / ey;
2048  else
2049  f = 0;
2050  for(y= 0; y <= ey; y++){
2051  x = (y*f) >> 16;
2052  fr = (y*f) & 0xFFFF;
2053  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2054  if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2055  }
2056  }
2057 }
2058 
2059 /**
2060  * Draw an arrow from (ex, ey) -> (sx, sy).
2061  * @param w width of the image
2062  * @param h height of the image
2063  * @param stride stride/linesize of the image
2064  * @param color color of the arrow
2065  */
2066 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2067  int ey, int w, int h, int stride, int color, int tail, int direction)
2068 {
2069  int dx,dy;
2070 
2071  if (direction) {
2072  FFSWAP(int, sx, ex);
2073  FFSWAP(int, sy, ey);
2074  }
2075 
2076  sx = av_clip(sx, -100, w + 100);
2077  sy = av_clip(sy, -100, h + 100);
2078  ex = av_clip(ex, -100, w + 100);
2079  ey = av_clip(ey, -100, h + 100);
2080 
2081  dx = ex - sx;
2082  dy = ey - sy;
2083 
2084  if (dx * dx + dy * dy > 3 * 3) {
2085  int rx = dx + dy;
2086  int ry = -dx + dy;
2087  int length = ff_sqrt((rx * rx + ry * ry) << 8);
2088 
2089  // FIXME subpixel accuracy
2090  rx = ROUNDED_DIV(rx * 3 << 4, length);
2091  ry = ROUNDED_DIV(ry * 3 << 4, length);
2092 
2093  if (tail) {
2094  rx = -rx;
2095  ry = -ry;
2096  }
2097 
2098  draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2099  draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2100  }
2101  draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2102 }
2103 
2104 /**
2105  * Print debugging info for the given picture.
2106  */
2107 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2108  uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2109  int *low_delay,
2110  int mb_width, int mb_height, int mb_stride, int quarter_sample)
2111 {
2112  if (avctx->hwaccel || !mbtype_table
2114  return;
2115 
2116 
2117  if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2118  int x,y;
2119 
2120  av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2122  for (y = 0; y < mb_height; y++) {
2123  for (x = 0; x < mb_width; x++) {
2124  if (avctx->debug & FF_DEBUG_SKIP) {
2125  int count = mbskip_table[x + y * mb_stride];
2126  if (count > 9)
2127  count = 9;
2128  av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2129  }
2130  if (avctx->debug & FF_DEBUG_QP) {
2131  av_log(avctx, AV_LOG_DEBUG, "%2d",
2132  qscale_table[x + y * mb_stride]);
2133  }
2134  if (avctx->debug & FF_DEBUG_MB_TYPE) {
2135  int mb_type = mbtype_table[x + y * mb_stride];
2136  // Type & MV direction
2137  if (IS_PCM(mb_type))
2138  av_log(avctx, AV_LOG_DEBUG, "P");
2139  else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2140  av_log(avctx, AV_LOG_DEBUG, "A");
2141  else if (IS_INTRA4x4(mb_type))
2142  av_log(avctx, AV_LOG_DEBUG, "i");
2143  else if (IS_INTRA16x16(mb_type))
2144  av_log(avctx, AV_LOG_DEBUG, "I");
2145  else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2146  av_log(avctx, AV_LOG_DEBUG, "d");
2147  else if (IS_DIRECT(mb_type))
2148  av_log(avctx, AV_LOG_DEBUG, "D");
2149  else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2150  av_log(avctx, AV_LOG_DEBUG, "g");
2151  else if (IS_GMC(mb_type))
2152  av_log(avctx, AV_LOG_DEBUG, "G");
2153  else if (IS_SKIP(mb_type))
2154  av_log(avctx, AV_LOG_DEBUG, "S");
2155  else if (!USES_LIST(mb_type, 1))
2156  av_log(avctx, AV_LOG_DEBUG, ">");
2157  else if (!USES_LIST(mb_type, 0))
2158  av_log(avctx, AV_LOG_DEBUG, "<");
2159  else {
2160  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2161  av_log(avctx, AV_LOG_DEBUG, "X");
2162  }
2163 
2164  // segmentation
2165  if (IS_8X8(mb_type))
2166  av_log(avctx, AV_LOG_DEBUG, "+");
2167  else if (IS_16X8(mb_type))
2168  av_log(avctx, AV_LOG_DEBUG, "-");
2169  else if (IS_8X16(mb_type))
2170  av_log(avctx, AV_LOG_DEBUG, "|");
2171  else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2172  av_log(avctx, AV_LOG_DEBUG, " ");
2173  else
2174  av_log(avctx, AV_LOG_DEBUG, "?");
2175 
2176 
2177  if (IS_INTERLACED(mb_type))
2178  av_log(avctx, AV_LOG_DEBUG, "=");
2179  else
2180  av_log(avctx, AV_LOG_DEBUG, " ");
2181  }
2182  }
2183  av_log(avctx, AV_LOG_DEBUG, "\n");
2184  }
2185  }
2186 
2187  if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2188  (avctx->debug_mv)) {
2189  const int shift = 1 + quarter_sample;
2190  int mb_y;
2191  uint8_t *ptr;
2192  int i;
2193  int h_chroma_shift, v_chroma_shift, block_height;
2194  const int width = avctx->width;
2195  const int height = avctx->height;
2196  const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2197  const int mv_stride = (mb_width << mv_sample_log2) +
2198  (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2199 
2200  *low_delay = 0; // needed to see the vectors without trashing the buffers
2201 
2202  avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2203 
2204  av_frame_make_writable(pict);
2205 
2206  pict->opaque = NULL;
2207  ptr = pict->data[0];
2208  block_height = 16 >> v_chroma_shift;
2209 
2210  for (mb_y = 0; mb_y < mb_height; mb_y++) {
2211  int mb_x;
2212  for (mb_x = 0; mb_x < mb_width; mb_x++) {
2213  const int mb_index = mb_x + mb_y * mb_stride;
2214  if ((avctx->debug_mv) && motion_val[0]) {
2215  int type;
2216  for (type = 0; type < 3; type++) {
2217  int direction = 0;
2218  switch (type) {
2219  case 0:
2220  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2221  (pict->pict_type!= AV_PICTURE_TYPE_P))
2222  continue;
2223  direction = 0;
2224  break;
2225  case 1:
2226  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2227  (pict->pict_type!= AV_PICTURE_TYPE_B))
2228  continue;
2229  direction = 0;
2230  break;
2231  case 2:
2232  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2233  (pict->pict_type!= AV_PICTURE_TYPE_B))
2234  continue;
2235  direction = 1;
2236  break;
2237  }
2238  if (!USES_LIST(mbtype_table[mb_index], direction))
2239  continue;
2240 
2241  if (IS_8X8(mbtype_table[mb_index])) {
2242  int i;
2243  for (i = 0; i < 4; i++) {
2244  int sx = mb_x * 16 + 4 + 8 * (i & 1);
2245  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2246  int xy = (mb_x * 2 + (i & 1) +
2247  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2248  int mx = (motion_val[direction][xy][0] >> shift) + sx;
2249  int my = (motion_val[direction][xy][1] >> shift) + sy;
2250  draw_arrow(ptr, sx, sy, mx, my, width,
2251  height, pict->linesize[0], 100, 0, direction);
2252  }
2253  } else if (IS_16X8(mbtype_table[mb_index])) {
2254  int i;
2255  for (i = 0; i < 2; i++) {
2256  int sx = mb_x * 16 + 8;
2257  int sy = mb_y * 16 + 4 + 8 * i;
2258  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2259  int mx = (motion_val[direction][xy][0] >> shift);
2260  int my = (motion_val[direction][xy][1] >> shift);
2261 
2262  if (IS_INTERLACED(mbtype_table[mb_index]))
2263  my *= 2;
2264 
2265  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2266  height, pict->linesize[0], 100, 0, direction);
2267  }
2268  } else if (IS_8X16(mbtype_table[mb_index])) {
2269  int i;
2270  for (i = 0; i < 2; i++) {
2271  int sx = mb_x * 16 + 4 + 8 * i;
2272  int sy = mb_y * 16 + 8;
2273  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2274  int mx = motion_val[direction][xy][0] >> shift;
2275  int my = motion_val[direction][xy][1] >> shift;
2276 
2277  if (IS_INTERLACED(mbtype_table[mb_index]))
2278  my *= 2;
2279 
2280  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2281  height, pict->linesize[0], 100, 0, direction);
2282  }
2283  } else {
2284  int sx= mb_x * 16 + 8;
2285  int sy= mb_y * 16 + 8;
2286  int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2287  int mx= (motion_val[direction][xy][0]>>shift) + sx;
2288  int my= (motion_val[direction][xy][1]>>shift) + sy;
2289  draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2290  }
2291  }
2292  }
2293  if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2294  uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2295  0x0101010101010101ULL;
2296  int y;
2297  for (y = 0; y < block_height; y++) {
2298  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2299  (block_height * mb_y + y) *
2300  pict->linesize[1]) = c;
2301  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2302  (block_height * mb_y + y) *
2303  pict->linesize[2]) = c;
2304  }
2305  }
2306  if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2307  motion_val[0]) {
2308  int mb_type = mbtype_table[mb_index];
2309  uint64_t u,v;
2310  int y;
2311 #define COLOR(theta, r) \
2312  u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2313  v = (int)(128 + r * sin(theta * 3.141592 / 180));
2314 
2315 
2316  u = v = 128;
2317  if (IS_PCM(mb_type)) {
2318  COLOR(120, 48)
2319  } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2320  IS_INTRA16x16(mb_type)) {
2321  COLOR(30, 48)
2322  } else if (IS_INTRA4x4(mb_type)) {
2323  COLOR(90, 48)
2324  } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2325  // COLOR(120, 48)
2326  } else if (IS_DIRECT(mb_type)) {
2327  COLOR(150, 48)
2328  } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2329  COLOR(170, 48)
2330  } else if (IS_GMC(mb_type)) {
2331  COLOR(190, 48)
2332  } else if (IS_SKIP(mb_type)) {
2333  // COLOR(180, 48)
2334  } else if (!USES_LIST(mb_type, 1)) {
2335  COLOR(240, 48)
2336  } else if (!USES_LIST(mb_type, 0)) {
2337  COLOR(0, 48)
2338  } else {
2339  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2340  COLOR(300,48)
2341  }
2342 
2343  u *= 0x0101010101010101ULL;
2344  v *= 0x0101010101010101ULL;
2345  for (y = 0; y < block_height; y++) {
2346  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2347  (block_height * mb_y + y) * pict->linesize[1]) = u;
2348  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2349  (block_height * mb_y + y) * pict->linesize[2]) = v;
2350  }
2351 
2352  // segmentation
2353  if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2354  *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2355  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2356  *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2357  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2358  }
2359  if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2360  for (y = 0; y < 16; y++)
2361  pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2362  pict->linesize[0]] ^= 0x80;
2363  }
2364  if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2365  int dm = 1 << (mv_sample_log2 - 2);
2366  for (i = 0; i < 4; i++) {
2367  int sx = mb_x * 16 + 8 * (i & 1);
2368  int sy = mb_y * 16 + 8 * (i >> 1);
2369  int xy = (mb_x * 2 + (i & 1) +
2370  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2371  // FIXME bidir
2372  int32_t *mv = (int32_t *) &motion_val[0][xy];
2373  if (mv[0] != mv[dm] ||
2374  mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2375  for (y = 0; y < 8; y++)
2376  pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2377  if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2378  *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2379  pict->linesize[0]) ^= 0x8080808080808080ULL;
2380  }
2381  }
2382 
2383  if (IS_INTERLACED(mb_type) &&
2384  avctx->codec->id == AV_CODEC_ID_H264) {
2385  // hmm
2386  }
2387  }
2388  mbskip_table[mb_index] = 0;
2389  }
2390  }
2391  }
2392 }
2393 
2395 {
2397  p->qscale_table, p->motion_val, &s->low_delay,
2398  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2399 }
2400 
2402 {
2404  int offset = 2*s->mb_stride + 1;
2405  if(!ref)
2406  return AVERROR(ENOMEM);
2407  av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2408  ref->size -= offset;
2409  ref->data += offset;
2410  return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2411 }
2412 
2414  uint8_t *dest, uint8_t *src,
2415  int field_based, int field_select,
2416  int src_x, int src_y,
2417  int width, int height, ptrdiff_t stride,
2418  int h_edge_pos, int v_edge_pos,
2419  int w, int h, h264_chroma_mc_func *pix_op,
2420  int motion_x, int motion_y)
2421 {
2422  const int lowres = s->avctx->lowres;
2423  const int op_index = FFMIN(lowres, 3);
2424  const int s_mask = (2 << lowres) - 1;
2425  int emu = 0;
2426  int sx, sy;
2427 
2428  if (s->quarter_sample) {
2429  motion_x /= 2;
2430  motion_y /= 2;
2431  }
2432 
2433  sx = motion_x & s_mask;
2434  sy = motion_y & s_mask;
2435  src_x += motion_x >> lowres + 1;
2436  src_y += motion_y >> lowres + 1;
2437 
2438  src += src_y * stride + src_x;
2439 
2440  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2441  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2443  s->linesize, s->linesize,
2444  w + 1, (h + 1) << field_based,
2445  src_x, src_y << field_based,
2446  h_edge_pos, v_edge_pos);
2447  src = s->edge_emu_buffer;
2448  emu = 1;
2449  }
2450 
2451  sx = (sx << 2) >> lowres;
2452  sy = (sy << 2) >> lowres;
2453  if (field_select)
2454  src += s->linesize;
2455  pix_op[op_index](dest, src, stride, h, sx, sy);
2456  return emu;
2457 }
2458 
2459 /* apply one mpeg motion vector to the three components */
2461  uint8_t *dest_y,
2462  uint8_t *dest_cb,
2463  uint8_t *dest_cr,
2464  int field_based,
2465  int bottom_field,
2466  int field_select,
2467  uint8_t **ref_picture,
2468  h264_chroma_mc_func *pix_op,
2469  int motion_x, int motion_y,
2470  int h, int mb_y)
2471 {
2472  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2473  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2474  ptrdiff_t uvlinesize, linesize;
2475  const int lowres = s->avctx->lowres;
2476  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2477  const int block_s = 8>>lowres;
2478  const int s_mask = (2 << lowres) - 1;
2479  const int h_edge_pos = s->h_edge_pos >> lowres;
2480  const int v_edge_pos = s->v_edge_pos >> lowres;
2481  linesize = s->current_picture.f->linesize[0] << field_based;
2482  uvlinesize = s->current_picture.f->linesize[1] << field_based;
2483 
2484  // FIXME obviously not perfect but qpel will not work in lowres anyway
2485  if (s->quarter_sample) {
2486  motion_x /= 2;
2487  motion_y /= 2;
2488  }
2489 
2490  if(field_based){
2491  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2492  }
2493 
2494  sx = motion_x & s_mask;
2495  sy = motion_y & s_mask;
2496  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2497  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2498 
2499  if (s->out_format == FMT_H263) {
2500  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2501  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2502  uvsrc_x = src_x >> 1;
2503  uvsrc_y = src_y >> 1;
2504  } else if (s->out_format == FMT_H261) {
2505  // even chroma mv's are full pel in H261
2506  mx = motion_x / 4;
2507  my = motion_y / 4;
2508  uvsx = (2 * mx) & s_mask;
2509  uvsy = (2 * my) & s_mask;
2510  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2511  uvsrc_y = mb_y * block_s + (my >> lowres);
2512  } else {
2513  if(s->chroma_y_shift){
2514  mx = motion_x / 2;
2515  my = motion_y / 2;
2516  uvsx = mx & s_mask;
2517  uvsy = my & s_mask;
2518  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2519  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2520  } else {
2521  if(s->chroma_x_shift){
2522  //Chroma422
2523  mx = motion_x / 2;
2524  uvsx = mx & s_mask;
2525  uvsy = motion_y & s_mask;
2526  uvsrc_y = src_y;
2527  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2528  } else {
2529  //Chroma444
2530  uvsx = motion_x & s_mask;
2531  uvsy = motion_y & s_mask;
2532  uvsrc_x = src_x;
2533  uvsrc_y = src_y;
2534  }
2535  }
2536  }
2537 
2538  ptr_y = ref_picture[0] + src_y * linesize + src_x;
2539  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2540  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2541 
2542  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2543  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2544  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2545  linesize >> field_based, linesize >> field_based,
2546  17, 17 + field_based,
2547  src_x, src_y << field_based, h_edge_pos,
2548  v_edge_pos);
2549  ptr_y = s->edge_emu_buffer;
2550  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2551  uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2552  uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2553  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2554  uvlinesize >> field_based, uvlinesize >> field_based,
2555  9, 9 + field_based,
2556  uvsrc_x, uvsrc_y << field_based,
2557  h_edge_pos >> 1, v_edge_pos >> 1);
2558  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2559  uvlinesize >> field_based,uvlinesize >> field_based,
2560  9, 9 + field_based,
2561  uvsrc_x, uvsrc_y << field_based,
2562  h_edge_pos >> 1, v_edge_pos >> 1);
2563  ptr_cb = ubuf;
2564  ptr_cr = vbuf;
2565  }
2566  }
2567 
2568  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2569  if (bottom_field) {
2570  dest_y += s->linesize;
2571  dest_cb += s->uvlinesize;
2572  dest_cr += s->uvlinesize;
2573  }
2574 
2575  if (field_select) {
2576  ptr_y += s->linesize;
2577  ptr_cb += s->uvlinesize;
2578  ptr_cr += s->uvlinesize;
2579  }
2580 
2581  sx = (sx << 2) >> lowres;
2582  sy = (sy << 2) >> lowres;
2583  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2584 
2585  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2586  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2587  uvsx = (uvsx << 2) >> lowres;
2588  uvsy = (uvsy << 2) >> lowres;
2589  if (hc) {
2590  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2591  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2592  }
2593  }
2594  // FIXME h261 lowres loop filter
2595 }
2596 
2598  uint8_t *dest_cb, uint8_t *dest_cr,
2599  uint8_t **ref_picture,
2600  h264_chroma_mc_func * pix_op,
2601  int mx, int my)
2602 {
2603  const int lowres = s->avctx->lowres;
2604  const int op_index = FFMIN(lowres, 3);
2605  const int block_s = 8 >> lowres;
2606  const int s_mask = (2 << lowres) - 1;
2607  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2608  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2609  int emu = 0, src_x, src_y, sx, sy;
2610  ptrdiff_t offset;
2611  uint8_t *ptr;
2612 
2613  if (s->quarter_sample) {
2614  mx /= 2;
2615  my /= 2;
2616  }
2617 
2618  /* In case of 8X8, we construct a single chroma motion vector
2619  with a special rounding */
2620  mx = ff_h263_round_chroma(mx);
2621  my = ff_h263_round_chroma(my);
2622 
2623  sx = mx & s_mask;
2624  sy = my & s_mask;
2625  src_x = s->mb_x * block_s + (mx >> lowres + 1);
2626  src_y = s->mb_y * block_s + (my >> lowres + 1);
2627 
2628  offset = src_y * s->uvlinesize + src_x;
2629  ptr = ref_picture[1] + offset;
2630  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2631  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2633  s->uvlinesize, s->uvlinesize,
2634  9, 9,
2635  src_x, src_y, h_edge_pos, v_edge_pos);
2636  ptr = s->edge_emu_buffer;
2637  emu = 1;
2638  }
2639  sx = (sx << 2) >> lowres;
2640  sy = (sy << 2) >> lowres;
2641  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2642 
2643  ptr = ref_picture[2] + offset;
2644  if (emu) {
2646  s->uvlinesize, s->uvlinesize,
2647  9, 9,
2648  src_x, src_y, h_edge_pos, v_edge_pos);
2649  ptr = s->edge_emu_buffer;
2650  }
2651  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2652 }
2653 
2654 /**
2655  * motion compensation of a single macroblock
2656  * @param s context
2657  * @param dest_y luma destination pointer
2658  * @param dest_cb chroma cb/u destination pointer
2659  * @param dest_cr chroma cr/v destination pointer
2660  * @param dir direction (0->forward, 1->backward)
2661  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2662  * @param pix_op halfpel motion compensation function (average or put normally)
2663  * the motion vectors are taken from s->mv and the MV type from s->mv_type
2664  */
2665 static inline void MPV_motion_lowres(MpegEncContext *s,
2666  uint8_t *dest_y, uint8_t *dest_cb,
2667  uint8_t *dest_cr,
2668  int dir, uint8_t **ref_picture,
2669  h264_chroma_mc_func *pix_op)
2670 {
2671  int mx, my;
2672  int mb_x, mb_y, i;
2673  const int lowres = s->avctx->lowres;
2674  const int block_s = 8 >>lowres;
2675 
2676  mb_x = s->mb_x;
2677  mb_y = s->mb_y;
2678 
2679  switch (s->mv_type) {
2680  case MV_TYPE_16X16:
2681  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2682  0, 0, 0,
2683  ref_picture, pix_op,
2684  s->mv[dir][0][0], s->mv[dir][0][1],
2685  2 * block_s, mb_y);
2686  break;
2687  case MV_TYPE_8X8:
2688  mx = 0;
2689  my = 0;
2690  for (i = 0; i < 4; i++) {
2691  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2692  s->linesize) * block_s,
2693  ref_picture[0], 0, 0,
2694  (2 * mb_x + (i & 1)) * block_s,
2695  (2 * mb_y + (i >> 1)) * block_s,
2696  s->width, s->height, s->linesize,
2697  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2698  block_s, block_s, pix_op,
2699  s->mv[dir][i][0], s->mv[dir][i][1]);
2700 
2701  mx += s->mv[dir][i][0];
2702  my += s->mv[dir][i][1];
2703  }
2704 
2705  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2706  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2707  pix_op, mx, my);
2708  break;
2709  case MV_TYPE_FIELD:
2710  if (s->picture_structure == PICT_FRAME) {
2711  /* top field */
2712  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2713  1, 0, s->field_select[dir][0],
2714  ref_picture, pix_op,
2715  s->mv[dir][0][0], s->mv[dir][0][1],
2716  block_s, mb_y);
2717  /* bottom field */
2718  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2719  1, 1, s->field_select[dir][1],
2720  ref_picture, pix_op,
2721  s->mv[dir][1][0], s->mv[dir][1][1],
2722  block_s, mb_y);
2723  } else {
2724  if (s->picture_structure != s->field_select[dir][0] + 1 &&
2725  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2726  ref_picture = s->current_picture_ptr->f->data;
2727 
2728  }
2729  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2730  0, 0, s->field_select[dir][0],
2731  ref_picture, pix_op,
2732  s->mv[dir][0][0],
2733  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2734  }
2735  break;
2736  case MV_TYPE_16X8:
2737  for (i = 0; i < 2; i++) {
2738  uint8_t **ref2picture;
2739 
2740  if (s->picture_structure == s->field_select[dir][i] + 1 ||
2741  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2742  ref2picture = ref_picture;
2743  } else {
2744  ref2picture = s->current_picture_ptr->f->data;
2745  }
2746 
2747  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2748  0, 0, s->field_select[dir][i],
2749  ref2picture, pix_op,
2750  s->mv[dir][i][0], s->mv[dir][i][1] +
2751  2 * block_s * i, block_s, mb_y >> 1);
2752 
2753  dest_y += 2 * block_s * s->linesize;
2754  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2755  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2756  }
2757  break;
2758  case MV_TYPE_DMV:
2759  if (s->picture_structure == PICT_FRAME) {
2760  for (i = 0; i < 2; i++) {
2761  int j;
2762  for (j = 0; j < 2; j++) {
2763  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2764  1, j, j ^ i,
2765  ref_picture, pix_op,
2766  s->mv[dir][2 * i + j][0],
2767  s->mv[dir][2 * i + j][1],
2768  block_s, mb_y);
2769  }
2771  }
2772  } else {
2773  for (i = 0; i < 2; i++) {
2774  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2775  0, 0, s->picture_structure != i + 1,
2776  ref_picture, pix_op,
2777  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2778  2 * block_s, mb_y >> 1);
2779 
2780  // after put we make avg of the same block
2782 
2783  // opposite parity is always in the same
2784  // frame if this is second field
2785  if (!s->first_field) {
2786  ref_picture = s->current_picture_ptr->f->data;
2787  }
2788  }
2789  }
2790  break;
2791  default:
2792  av_assert2(0);
2793  }
2794 }
2795 
2796 /**
2797  * find the lowest MB row referenced in the MVs
2798  */
2800 {
2801  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2802  int my, off, i, mvs;
2803 
2804  if (s->picture_structure != PICT_FRAME || s->mcsel)
2805  goto unhandled;
2806 
2807  switch (s->mv_type) {
2808  case MV_TYPE_16X16:
2809  mvs = 1;
2810  break;
2811  case MV_TYPE_16X8:
2812  mvs = 2;
2813  break;
2814  case MV_TYPE_8X8:
2815  mvs = 4;
2816  break;
2817  default:
2818  goto unhandled;
2819  }
2820 
2821  for (i = 0; i < mvs; i++) {
2822  my = s->mv[dir][i][1]<<qpel_shift;
2823  my_max = FFMAX(my_max, my);
2824  my_min = FFMIN(my_min, my);
2825  }
2826 
2827  off = (FFMAX(-my_min, my_max) + 63) >> 6;
2828 
2829  return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2830 unhandled:
2831  return s->mb_height-1;
2832 }
2833 
2834 /* put block[] to dest[] */
2835 static inline void put_dct(MpegEncContext *s,
2836  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2837 {
2838  s->dct_unquantize_intra(s, block, i, qscale);
2839  s->idsp.idct_put(dest, line_size, block);
2840 }
2841 
2842 /* add block[] to dest[] */
2843 static inline void add_dct(MpegEncContext *s,
2844  int16_t *block, int i, uint8_t *dest, int line_size)
2845 {
2846  if (s->block_last_index[i] >= 0) {
2847  s->idsp.idct_add(dest, line_size, block);
2848  }
2849 }
2850 
2851 static inline void add_dequant_dct(MpegEncContext *s,
2852  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2853 {
2854  if (s->block_last_index[i] >= 0) {
2855  s->dct_unquantize_inter(s, block, i, qscale);
2856 
2857  s->idsp.idct_add(dest, line_size, block);
2858  }
2859 }
2860 
2861 /**
2862  * Clean dc, ac, coded_block for the current non-intra MB.
2863  */
2865 {
2866  int wrap = s->b8_stride;
2867  int xy = s->block_index[0];
2868 
2869  s->dc_val[0][xy ] =
2870  s->dc_val[0][xy + 1 ] =
2871  s->dc_val[0][xy + wrap] =
2872  s->dc_val[0][xy + 1 + wrap] = 1024;
2873  /* ac pred */
2874  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2875  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2876  if (s->msmpeg4_version>=3) {
2877  s->coded_block[xy ] =
2878  s->coded_block[xy + 1 ] =
2879  s->coded_block[xy + wrap] =
2880  s->coded_block[xy + 1 + wrap] = 0;
2881  }
2882  /* chroma */
2883  wrap = s->mb_stride;
2884  xy = s->mb_x + s->mb_y * wrap;
2885  s->dc_val[1][xy] =
2886  s->dc_val[2][xy] = 1024;
2887  /* ac pred */
2888  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2889  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2890 
2891  s->mbintra_table[xy]= 0;
2892 }
2893 
2894 /* generic function called after a macroblock has been parsed by the
2895  decoder or after it has been encoded by the encoder.
2896 
2897  Important variables used:
2898  s->mb_intra : true if intra macroblock
2899  s->mv_dir : motion vector direction
2900  s->mv_type : motion vector type
2901  s->mv : motion vector
2902  s->interlaced_dct : true if interlaced dct used (mpeg2)
2903  */
2904 static av_always_inline
2906  int lowres_flag, int is_mpeg12)
2907 {
2908  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2909 
2910  if (CONFIG_XVMC &&
2911  s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
2912  s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
2913  return;
2914  }
2915 
2916  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2917  /* print DCT coefficients */
2918  int i,j;
2919  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2920  for(i=0; i<6; i++){
2921  for(j=0; j<64; j++){
2922  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
2923  block[i][s->idsp.idct_permutation[j]]);
2924  }
2925  av_log(s->avctx, AV_LOG_DEBUG, "\n");
2926  }
2927  }
2928 
2929  s->current_picture.qscale_table[mb_xy] = s->qscale;
2930 
2931  /* update DC predictors for P macroblocks */
2932  if (!s->mb_intra) {
2933  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2934  if(s->mbintra_table[mb_xy])
2936  } else {
2937  s->last_dc[0] =
2938  s->last_dc[1] =
2939  s->last_dc[2] = 128 << s->intra_dc_precision;
2940  }
2941  }
2942  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2943  s->mbintra_table[mb_xy]=1;
2944 
2945  if ( (s->flags&CODEC_FLAG_PSNR)
2947  || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2948  uint8_t *dest_y, *dest_cb, *dest_cr;
2949  int dct_linesize, dct_offset;
2950  op_pixels_func (*op_pix)[4];
2951  qpel_mc_func (*op_qpix)[16];
2952  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2953  const int uvlinesize = s->current_picture.f->linesize[1];
2954  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2955  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2956 
2957  /* avoid copy if macroblock skipped in last frame too */
2958  /* skip only during decoding as we might trash the buffers during encoding a bit */
2959  if(!s->encoding){
2960  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2961 
2962  if (s->mb_skipped) {
2963  s->mb_skipped= 0;
2965  *mbskip_ptr = 1;
2966  } else if(!s->current_picture.reference) {
2967  *mbskip_ptr = 1;
2968  } else{
2969  *mbskip_ptr = 0; /* not skipped */
2970  }
2971  }
2972 
2973  dct_linesize = linesize << s->interlaced_dct;
2974  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2975 
2976  if(readable){
2977  dest_y= s->dest[0];
2978  dest_cb= s->dest[1];
2979  dest_cr= s->dest[2];
2980  }else{
2981  dest_y = s->b_scratchpad;
2982  dest_cb= s->b_scratchpad+16*linesize;
2983  dest_cr= s->b_scratchpad+32*linesize;
2984  }
2985 
2986  if (!s->mb_intra) {
2987  /* motion handling */
2988  /* decoding or more than one mb_type (MC was already done otherwise) */
2989  if(!s->encoding){
2990 
2991  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2992  if (s->mv_dir & MV_DIR_FORWARD) {
2995  0);
2996  }
2997  if (s->mv_dir & MV_DIR_BACKWARD) {
3000  0);
3001  }
3002  }
3003 
3004  if(lowres_flag){
3006 
3007  if (s->mv_dir & MV_DIR_FORWARD) {
3008  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3010  }
3011  if (s->mv_dir & MV_DIR_BACKWARD) {
3012  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3013  }
3014  }else{
3015  op_qpix = s->me.qpel_put;
3016  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3017  op_pix = s->hdsp.put_pixels_tab;
3018  }else{
3019  op_pix = s->hdsp.put_no_rnd_pixels_tab;
3020  }
3021  if (s->mv_dir & MV_DIR_FORWARD) {
3022  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3023  op_pix = s->hdsp.avg_pixels_tab;
3024  op_qpix= s->me.qpel_avg;
3025  }
3026  if (s->mv_dir & MV_DIR_BACKWARD) {
3027  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3028  }
3029  }
3030  }
3031 
3032  /* skip dequant / idct if we are really late ;) */
3033  if(s->avctx->skip_idct){
3036  || s->avctx->skip_idct >= AVDISCARD_ALL)
3037  goto skip_idct;
3038  }
3039 
3040  /* add dct residue */
3042  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3043  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3044  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3045  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3046  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3047 
3048  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3049  if (s->chroma_y_shift){
3050  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3051  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3052  }else{
3053  dct_linesize >>= 1;
3054  dct_offset >>=1;
3055  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3056  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3057  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3058  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3059  }
3060  }
3061  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3062  add_dct(s, block[0], 0, dest_y , dct_linesize);
3063  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3064  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3065  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3066 
3067  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3068  if(s->chroma_y_shift){//Chroma420
3069  add_dct(s, block[4], 4, dest_cb, uvlinesize);
3070  add_dct(s, block[5], 5, dest_cr, uvlinesize);
3071  }else{
3072  //chroma422
3073  dct_linesize = uvlinesize << s->interlaced_dct;
3074  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3075 
3076  add_dct(s, block[4], 4, dest_cb, dct_linesize);
3077  add_dct(s, block[5], 5, dest_cr, dct_linesize);
3078  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3079  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3080  if(!s->chroma_x_shift){//Chroma444
3081  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3082  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3083  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3084  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3085  }
3086  }
3087  }//fi gray
3088  }
3089  else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3090  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3091  }
3092  } else {
3093  /* dct only in intra block */
3095  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3096  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3097  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3098  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3099 
3100  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3101  if(s->chroma_y_shift){
3102  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3103  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3104  }else{
3105  dct_offset >>=1;
3106  dct_linesize >>=1;
3107  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3108  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3109  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3110  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3111  }
3112  }
3113  }else{
3114  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3115  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3116  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3117  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3118 
3119  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3120  if(s->chroma_y_shift){
3121  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3122  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3123  }else{
3124 
3125  dct_linesize = uvlinesize << s->interlaced_dct;
3126  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3127 
3128  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3129  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3130  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3131  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3132  if(!s->chroma_x_shift){//Chroma444
3133  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3134  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3135  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3136  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3137  }
3138  }
3139  }//gray
3140  }
3141  }
3142 skip_idct:
3143  if(!readable){
3144  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3145  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3146  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3147  }
3148  }
3149 }
3150 
3151 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
3152 #if !CONFIG_SMALL
3153  if(s->out_format == FMT_MPEG1) {
3154  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
3155  else MPV_decode_mb_internal(s, block, 0, 1);
3156  } else
3157 #endif
3158  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
3159  else MPV_decode_mb_internal(s, block, 0, 0);
3160 }
3161 
3163 {
3165  s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3166  s->first_field, s->low_delay);
3167 }
3168 
3169 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3170  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3171  const int uvlinesize = s->current_picture.f->linesize[1];
3172  const int mb_size= 4 - s->avctx->lowres;
3173 
3174  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3175  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3176  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3177  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3178  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3179  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3180  //block_index is not used by mpeg2, so it is not affected by chroma_format
3181 
3182  s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3183  s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3184  s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3185 
3187  {
3188  if(s->picture_structure==PICT_FRAME){
3189  s->dest[0] += s->mb_y * linesize << mb_size;
3190  s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3191  s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3192  }else{
3193  s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3194  s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3195  s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3197  }
3198  }
3199 }
3200 
3201 /**
3202  * Permute an 8x8 block.
3203  * @param block the block which will be permuted according to the given permutation vector
3204  * @param permutation the permutation vector
3205  * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3206  * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3207  * (inverse) permutated to scantable order!
3208  */
3209 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3210 {
3211  int i;
3212  int16_t temp[64];
3213 
3214  if(last<=0) return;
3215  //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3216 
3217  for(i=0; i<=last; i++){
3218  const int j= scantable[i];
3219  temp[j]= block[j];
3220  block[j]=0;
3221  }
3222 
3223  for(i=0; i<=last; i++){
3224  const int j= scantable[i];
3225  const int perm_j= permutation[j];
3226  block[perm_j]= temp[j];
3227  }
3228 }
3229 
3231  int i;
3232  MpegEncContext *s = avctx->priv_data;
3233 
3234  if(s==NULL || s->picture==NULL)
3235  return;
3236 
3237  for (i = 0; i < MAX_PICTURE_COUNT; i++)
3238  ff_mpeg_unref_picture(s, &s->picture[i]);
3240 
3244 
3245  s->mb_x= s->mb_y= 0;
3246  s->closed_gop= 0;
3247 
3248  s->parse_context.state= -1;
3250  s->parse_context.overread= 0;
3252  s->parse_context.index= 0;
3253  s->parse_context.last_index= 0;
3254  s->bitstream_buffer_size=0;
3255  s->pp_time=0;
3256 }
3257 
3258 /**
3259  * set qscale and update qscale dependent variables.
3260  */
3261 void ff_set_qscale(MpegEncContext * s, int qscale)
3262 {
3263  if (qscale < 1)
3264  qscale = 1;
3265  else if (qscale > 31)
3266  qscale = 31;
3267 
3268  s->qscale = qscale;
3269  s->chroma_qscale= s->chroma_qscale_table[qscale];
3270 
3271  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3273 }
3274 
3276 {
3279 }