FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
35 #include "avcodec.h"
36 #include "blockdsp.h"
37 #include "h264chroma.h"
38 #include "idctdsp.h"
39 #include "internal.h"
40 #include "mathops.h"
41 #include "mpegutils.h"
42 #include "mpegvideo.h"
43 #include "mjpegenc.h"
44 #include "msmpeg4.h"
45 #include "qpeldsp.h"
46 #include "thread.h"
47 #include <limits.h>
48 
50 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
51  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
52  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
53 };
54 
56 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
57  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
65 };
66 
67 static const uint8_t mpeg2_dc_scale_table1[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
77 };
78 
79 static const uint8_t mpeg2_dc_scale_table2[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 };
90 
91 static const uint8_t mpeg2_dc_scale_table3[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 };
102 
103 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
108 };
109 
111  0, 1, 2, 3, 8, 9, 16, 17,
112  10, 11, 4, 5, 6, 7, 15, 14,
113  13, 12, 19, 18, 24, 25, 32, 33,
114  26, 27, 20, 21, 22, 23, 28, 29,
115  30, 31, 34, 35, 40, 41, 48, 49,
116  42, 43, 36, 37, 38, 39, 44, 45,
117  46, 47, 50, 51, 56, 57, 58, 59,
118  52, 53, 54, 55, 60, 61, 62, 63,
119 };
120 
122  0, 8, 16, 24, 1, 9, 2, 10,
123  17, 25, 32, 40, 48, 56, 57, 49,
124  41, 33, 26, 18, 3, 11, 4, 12,
125  19, 27, 34, 42, 50, 58, 35, 43,
126  51, 59, 20, 28, 5, 13, 6, 14,
127  21, 29, 36, 44, 52, 60, 37, 45,
128  53, 61, 22, 30, 7, 15, 23, 31,
129  38, 46, 54, 62, 39, 47, 55, 63,
130 };
131 
133  int16_t *block, int n, int qscale)
134 {
135  int i, level, nCoeffs;
136  const uint16_t *quant_matrix;
137 
138  nCoeffs= s->block_last_index[n];
139 
140  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
141  /* XXX: only mpeg1 */
142  quant_matrix = s->intra_matrix;
143  for(i=1;i<=nCoeffs;i++) {
144  int j= s->intra_scantable.permutated[i];
145  level = block[j];
146  if (level) {
147  if (level < 0) {
148  level = -level;
149  level = (int)(level * qscale * quant_matrix[j]) >> 3;
150  level = (level - 1) | 1;
151  level = -level;
152  } else {
153  level = (int)(level * qscale * quant_matrix[j]) >> 3;
154  level = (level - 1) | 1;
155  }
156  block[j] = level;
157  }
158  }
159 }
160 
162  int16_t *block, int n, int qscale)
163 {
164  int i, level, nCoeffs;
165  const uint16_t *quant_matrix;
166 
167  nCoeffs= s->block_last_index[n];
168 
169  quant_matrix = s->inter_matrix;
170  for(i=0; i<=nCoeffs; i++) {
171  int j= s->intra_scantable.permutated[i];
172  level = block[j];
173  if (level) {
174  if (level < 0) {
175  level = -level;
176  level = (((level << 1) + 1) * qscale *
177  ((int) (quant_matrix[j]))) >> 4;
178  level = (level - 1) | 1;
179  level = -level;
180  } else {
181  level = (((level << 1) + 1) * qscale *
182  ((int) (quant_matrix[j]))) >> 4;
183  level = (level - 1) | 1;
184  }
185  block[j] = level;
186  }
187  }
188 }
189 
191  int16_t *block, int n, int qscale)
192 {
193  int i, level, nCoeffs;
194  const uint16_t *quant_matrix;
195 
196  if(s->alternate_scan) nCoeffs= 63;
197  else nCoeffs= s->block_last_index[n];
198 
199  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
200  quant_matrix = s->intra_matrix;
201  for(i=1;i<=nCoeffs;i++) {
202  int j= s->intra_scantable.permutated[i];
203  level = block[j];
204  if (level) {
205  if (level < 0) {
206  level = -level;
207  level = (int)(level * qscale * quant_matrix[j]) >> 3;
208  level = -level;
209  } else {
210  level = (int)(level * qscale * quant_matrix[j]) >> 3;
211  }
212  block[j] = level;
213  }
214  }
215 }
216 
218  int16_t *block, int n, int qscale)
219 {
220  int i, level, nCoeffs;
221  const uint16_t *quant_matrix;
222  int sum=-1;
223 
224  if(s->alternate_scan) nCoeffs= 63;
225  else nCoeffs= s->block_last_index[n];
226 
227  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
228  sum += block[0];
229  quant_matrix = s->intra_matrix;
230  for(i=1;i<=nCoeffs;i++) {
231  int j= s->intra_scantable.permutated[i];
232  level = block[j];
233  if (level) {
234  if (level < 0) {
235  level = -level;
236  level = (int)(level * qscale * quant_matrix[j]) >> 3;
237  level = -level;
238  } else {
239  level = (int)(level * qscale * quant_matrix[j]) >> 3;
240  }
241  block[j] = level;
242  sum+=level;
243  }
244  }
245  block[63]^=sum&1;
246 }
247 
249  int16_t *block, int n, int qscale)
250 {
251  int i, level, nCoeffs;
252  const uint16_t *quant_matrix;
253  int sum=-1;
254 
255  if(s->alternate_scan) nCoeffs= 63;
256  else nCoeffs= s->block_last_index[n];
257 
258  quant_matrix = s->inter_matrix;
259  for(i=0; i<=nCoeffs; i++) {
260  int j= s->intra_scantable.permutated[i];
261  level = block[j];
262  if (level) {
263  if (level < 0) {
264  level = -level;
265  level = (((level << 1) + 1) * qscale *
266  ((int) (quant_matrix[j]))) >> 4;
267  level = -level;
268  } else {
269  level = (((level << 1) + 1) * qscale *
270  ((int) (quant_matrix[j]))) >> 4;
271  }
272  block[j] = level;
273  sum+=level;
274  }
275  }
276  block[63]^=sum&1;
277 }
278 
280  int16_t *block, int n, int qscale)
281 {
282  int i, level, qmul, qadd;
283  int nCoeffs;
284 
285  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
286 
287  qmul = qscale << 1;
288 
289  if (!s->h263_aic) {
290  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
291  qadd = (qscale - 1) | 1;
292  }else{
293  qadd = 0;
294  }
295  if(s->ac_pred)
296  nCoeffs=63;
297  else
298  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
299 
300  for(i=1; i<=nCoeffs; i++) {
301  level = block[i];
302  if (level) {
303  if (level < 0) {
304  level = level * qmul - qadd;
305  } else {
306  level = level * qmul + qadd;
307  }
308  block[i] = level;
309  }
310  }
311 }
312 
314  int16_t *block, int n, int qscale)
315 {
316  int i, level, qmul, qadd;
317  int nCoeffs;
318 
319  av_assert2(s->block_last_index[n]>=0);
320 
321  qadd = (qscale - 1) | 1;
322  qmul = qscale << 1;
323 
324  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
325 
326  for(i=0; i<=nCoeffs; i++) {
327  level = block[i];
328  if (level) {
329  if (level < 0) {
330  level = level * qmul - qadd;
331  } else {
332  level = level * qmul + qadd;
333  }
334  block[i] = level;
335  }
336  }
337 }
338 
339 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
340  int (*mv)[2][4][2],
341  int mb_x, int mb_y, int mb_intra, int mb_skipped)
342 {
343  MpegEncContext *s = opaque;
344 
345  s->mv_dir = mv_dir;
346  s->mv_type = mv_type;
347  s->mb_intra = mb_intra;
348  s->mb_skipped = mb_skipped;
349  s->mb_x = mb_x;
350  s->mb_y = mb_y;
351  memcpy(s->mv, mv, sizeof(*mv));
352 
355 
356  s->bdsp.clear_blocks(s->block[0]);
357 
358  s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
359  s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
360  s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
361 
362  if (ref)
363  av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
364  ff_MPV_decode_mb(s, s->block);
365 }
366 
367 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
368 {
369  while(h--)
370  memset(dst + h*linesize, 128, 16);
371 }
372 
373 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
374 {
375  while(h--)
376  memset(dst + h*linesize, 128, 8);
377 }
378 
379 /* init common dct for both encoder and decoder */
381 {
382  ff_blockdsp_init(&s->bdsp, s->avctx);
383  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
384  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
385  ff_idctdsp_init(&s->idsp, s->avctx);
386  ff_me_cmp_init(&s->mecc, s->avctx);
389 
390  if (s->avctx->debug & FF_DEBUG_NOMC) {
391  int i;
392  for (i=0; i<4; i++) {
393  s->hdsp.avg_pixels_tab[0][i] = gray16;
394  s->hdsp.put_pixels_tab[0][i] = gray16;
395  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
396 
397  s->hdsp.avg_pixels_tab[1][i] = gray8;
398  s->hdsp.put_pixels_tab[1][i] = gray8;
399  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
400  }
401  }
402 
408  if (s->flags & CODEC_FLAG_BITEXACT)
411 
412  if (HAVE_INTRINSICS_NEON)
414 
415  if (ARCH_ALPHA)
417  if (ARCH_ARM)
419  if (ARCH_PPC)
421  if (ARCH_X86)
423 
424  /* load & permutate scantables
425  * note: only wmv uses different ones
426  */
427  if (s->alternate_scan) {
430  } else {
433  }
436 
437  return 0;
438 }
439 
440 static int frame_size_alloc(MpegEncContext *s, int linesize)
441 {
442  int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
443 
445  return 0;
446 
447  if (linesize < 24) {
448  av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
449  return AVERROR_PATCHWELCOME;
450  }
451 
452  // edge emu needs blocksize + filter length - 1
453  // (= 17x17 for halfpel / 21x21 for h264)
454  // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
455  // at uvlinesize. It supports only YUV420 so 24x24 is enough
456  // linesize * interlaced * MBsize
457  // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
458  FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 68,
459  fail);
460 
461  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
462  fail)
463  s->me.temp = s->me.scratchpad;
464  s->rd_scratchpad = s->me.scratchpad;
465  s->b_scratchpad = s->me.scratchpad;
466  s->obmc_scratchpad = s->me.scratchpad + 16;
467 
468  return 0;
469 fail:
471  return AVERROR(ENOMEM);
472 }
473 
474 /**
475  * Allocate a frame buffer
476  */
478 {
479  int edges_needed = av_codec_is_encoder(s->avctx->codec);
480  int r, ret;
481 
482  pic->tf.f = pic->f;
483  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
485  s->codec_id != AV_CODEC_ID_MSS2) {
486  if (edges_needed) {
487  pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
488  pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
489  }
490 
491  r = ff_thread_get_buffer(s->avctx, &pic->tf,
492  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
493  } else {
494  pic->f->width = s->avctx->width;
495  pic->f->height = s->avctx->height;
496  pic->f->format = s->avctx->pix_fmt;
497  r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
498  }
499 
500  if (r < 0 || !pic->f->buf[0]) {
501  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
502  r, pic->f->data[0]);
503  return -1;
504  }
505 
506  if (edges_needed) {
507  int i;
508  for (i = 0; pic->f->data[i]; i++) {
509  int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
510  pic->f->linesize[i] +
511  (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
512  pic->f->data[i] += offset;
513  }
514  pic->f->width = s->avctx->width;
515  pic->f->height = s->avctx->height;
516  }
517 
518  if (s->avctx->hwaccel) {
519  assert(!pic->hwaccel_picture_private);
522  if (!pic->hwaccel_priv_buf) {
523  av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
524  return -1;
525  }
527  }
528  }
529 
530  if (s->linesize && (s->linesize != pic->f->linesize[0] ||
531  s->uvlinesize != pic->f->linesize[1])) {
533  "get_buffer() failed (stride changed)\n");
534  ff_mpeg_unref_picture(s, pic);
535  return -1;
536  }
537 
538  if (pic->f->linesize[1] != pic->f->linesize[2]) {
540  "get_buffer() failed (uv stride mismatch)\n");
541  ff_mpeg_unref_picture(s, pic);
542  return -1;
543  }
544 
545  if (!s->edge_emu_buffer &&
546  (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
548  "get_buffer() failed to allocate context scratch buffers.\n");
549  ff_mpeg_unref_picture(s, pic);
550  return ret;
551  }
552 
553  return 0;
554 }
555 
557 {
558  int i;
559 
560  pic->alloc_mb_width =
561  pic->alloc_mb_height = 0;
562 
569 
570  for (i = 0; i < 2; i++) {
572  av_buffer_unref(&pic->ref_index_buf[i]);
573  }
574 }
575 
577 {
578  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
579  const int mb_array_size = s->mb_stride * s->mb_height;
580  const int b8_array_size = s->b8_stride * s->mb_height * 2;
581  int i;
582 
583 
584  pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
585  pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
586  pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
587  sizeof(uint32_t));
588  if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
589  return AVERROR(ENOMEM);
590 
591  if (s->encoding) {
592  pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
593  pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
594  pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
595  if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
596  return AVERROR(ENOMEM);
597  }
598 
599  if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
600  int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
601  int ref_index_size = 4 * mb_array_size;
602 
603  for (i = 0; mv_size && i < 2; i++) {
604  pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
605  pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
606  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
607  return AVERROR(ENOMEM);
608  }
609  }
610 
611  pic->alloc_mb_width = s->mb_width;
612  pic->alloc_mb_height = s->mb_height;
613 
614  return 0;
615 }
616 
618 {
619  int ret, i;
620 #define MAKE_WRITABLE(table) \
621 do {\
622  if (pic->table &&\
623  (ret = av_buffer_make_writable(&pic->table)) < 0)\
624  return ret;\
625 } while (0)
626 
627  MAKE_WRITABLE(mb_var_buf);
628  MAKE_WRITABLE(mc_mb_var_buf);
629  MAKE_WRITABLE(mb_mean_buf);
630  MAKE_WRITABLE(mbskip_table_buf);
631  MAKE_WRITABLE(qscale_table_buf);
632  MAKE_WRITABLE(mb_type_buf);
633 
634  for (i = 0; i < 2; i++) {
635  MAKE_WRITABLE(motion_val_buf[i]);
636  MAKE_WRITABLE(ref_index_buf[i]);
637  }
638 
639  return 0;
640 }
641 
642 /**
643  * Allocate a Picture.
644  * The pixels are allocated/set by calling get_buffer() if shared = 0
645  */
646 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
647 {
648  int i, ret;
649 
650  if (pic->qscale_table_buf)
651  if ( pic->alloc_mb_width != s->mb_width
652  || pic->alloc_mb_height != s->mb_height)
654 
655  if (shared) {
656  av_assert0(pic->f->data[0]);
657  pic->shared = 1;
658  } else {
659  av_assert0(!pic->f->buf[0]);
660 
661  if (alloc_frame_buffer(s, pic) < 0)
662  return -1;
663 
664  s->linesize = pic->f->linesize[0];
665  s->uvlinesize = pic->f->linesize[1];
666  }
667 
668  if (!pic->qscale_table_buf)
669  ret = alloc_picture_tables(s, pic);
670  else
671  ret = make_tables_writable(pic);
672  if (ret < 0)
673  goto fail;
674 
675  if (s->encoding) {
676  pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
677  pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
678  pic->mb_mean = pic->mb_mean_buf->data;
679  }
680 
681  pic->mbskip_table = pic->mbskip_table_buf->data;
682  pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
683  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
684 
685  if (pic->motion_val_buf[0]) {
686  for (i = 0; i < 2; i++) {
687  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
688  pic->ref_index[i] = pic->ref_index_buf[i]->data;
689  }
690  }
691 
692  return 0;
693 fail:
694  av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
695  ff_mpeg_unref_picture(s, pic);
697  return AVERROR(ENOMEM);
698 }
699 
700 /**
701  * Deallocate a picture.
702  */
704 {
705  int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
706 
707  pic->tf.f = pic->f;
708  /* WM Image / Screen codecs allocate internal buffers with different
709  * dimensions / colorspaces; ignore user-defined callbacks for these. */
710  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
713  ff_thread_release_buffer(s->avctx, &pic->tf);
714  else if (pic->f)
715  av_frame_unref(pic->f);
716 
718 
719  if (pic->needs_realloc)
721 
722  memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
723 }
724 
726 {
727  int i;
728 
729 #define UPDATE_TABLE(table)\
730 do {\
731  if (src->table &&\
732  (!dst->table || dst->table->buffer != src->table->buffer)) {\
733  av_buffer_unref(&dst->table);\
734  dst->table = av_buffer_ref(src->table);\
735  if (!dst->table) {\
736  ff_free_picture_tables(dst);\
737  return AVERROR(ENOMEM);\
738  }\
739  }\
740 } while (0)
741 
742  UPDATE_TABLE(mb_var_buf);
743  UPDATE_TABLE(mc_mb_var_buf);
744  UPDATE_TABLE(mb_mean_buf);
745  UPDATE_TABLE(mbskip_table_buf);
746  UPDATE_TABLE(qscale_table_buf);
747  UPDATE_TABLE(mb_type_buf);
748  for (i = 0; i < 2; i++) {
749  UPDATE_TABLE(motion_val_buf[i]);
750  UPDATE_TABLE(ref_index_buf[i]);
751  }
752 
753  dst->mb_var = src->mb_var;
754  dst->mc_mb_var = src->mc_mb_var;
755  dst->mb_mean = src->mb_mean;
756  dst->mbskip_table = src->mbskip_table;
757  dst->qscale_table = src->qscale_table;
758  dst->mb_type = src->mb_type;
759  for (i = 0; i < 2; i++) {
760  dst->motion_val[i] = src->motion_val[i];
761  dst->ref_index[i] = src->ref_index[i];
762  }
763 
764  dst->alloc_mb_width = src->alloc_mb_width;
765  dst->alloc_mb_height = src->alloc_mb_height;
766 
767  return 0;
768 }
769 
771 {
772  int ret;
773 
774  av_assert0(!dst->f->buf[0]);
775  av_assert0(src->f->buf[0]);
776 
777  src->tf.f = src->f;
778  dst->tf.f = dst->f;
779  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
780  if (ret < 0)
781  goto fail;
782 
783  ret = update_picture_tables(dst, src);
784  if (ret < 0)
785  goto fail;
786 
787  if (src->hwaccel_picture_private) {
789  if (!dst->hwaccel_priv_buf)
790  goto fail;
792  }
793 
794  dst->field_picture = src->field_picture;
795  dst->mb_var_sum = src->mb_var_sum;
796  dst->mc_mb_var_sum = src->mc_mb_var_sum;
797  dst->b_frame_score = src->b_frame_score;
798  dst->needs_realloc = src->needs_realloc;
799  dst->reference = src->reference;
800  dst->shared = src->shared;
801 
802  return 0;
803 fail:
804  ff_mpeg_unref_picture(s, dst);
805  return ret;
806 }
807 
809 {
810  int16_t (*tmp)[64];
811 
812  tmp = s->pblocks[4];
813  s->pblocks[4] = s->pblocks[5];
814  s->pblocks[5] = tmp;
815 }
816 
818 {
819  int y_size = s->b8_stride * (2 * s->mb_height + 1);
820  int c_size = s->mb_stride * (s->mb_height + 1);
821  int yc_size = y_size + 2 * c_size;
822  int i;
823 
824  if (s->mb_height & 1)
825  yc_size += 2*s->b8_stride + 2*s->mb_stride;
826 
827  s->edge_emu_buffer =
828  s->me.scratchpad =
829  s->me.temp =
830  s->rd_scratchpad =
831  s->b_scratchpad =
832  s->obmc_scratchpad = NULL;
833 
834  if (s->encoding) {
835  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
836  ME_MAP_SIZE * sizeof(uint32_t), fail)
838  ME_MAP_SIZE * sizeof(uint32_t), fail)
839  if (s->avctx->noise_reduction) {
841  2 * 64 * sizeof(int), fail)
842  }
843  }
844  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
845  s->block = s->blocks[0];
846 
847  for (i = 0; i < 12; i++) {
848  s->pblocks[i] = &s->block[i];
849  }
850  if (s->avctx->codec_tag == AV_RL32("VCR2"))
851  exchange_uv(s);
852 
853  if (s->out_format == FMT_H263) {
854  /* ac values */
856  yc_size * sizeof(int16_t) * 16, fail);
857  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
858  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
859  s->ac_val[2] = s->ac_val[1] + c_size;
860  }
861 
862  return 0;
863 fail:
864  return -1; // free() through ff_MPV_common_end()
865 }
866 
868 {
869  if (s == NULL)
870  return;
871 
873  av_freep(&s->me.scratchpad);
874  s->me.temp =
875  s->rd_scratchpad =
876  s->b_scratchpad =
877  s->obmc_scratchpad = NULL;
878 
879  av_freep(&s->dct_error_sum);
880  av_freep(&s->me.map);
881  av_freep(&s->me.score_map);
882  av_freep(&s->blocks);
883  av_freep(&s->ac_val_base);
884  s->block = NULL;
885 }
886 
888 {
889 #define COPY(a) bak->a = src->a
890  COPY(edge_emu_buffer);
891  COPY(me.scratchpad);
892  COPY(me.temp);
893  COPY(rd_scratchpad);
894  COPY(b_scratchpad);
895  COPY(obmc_scratchpad);
896  COPY(me.map);
897  COPY(me.score_map);
898  COPY(blocks);
899  COPY(block);
900  COPY(start_mb_y);
901  COPY(end_mb_y);
902  COPY(me.map_generation);
903  COPY(pb);
904  COPY(dct_error_sum);
905  COPY(dct_count[0]);
906  COPY(dct_count[1]);
907  COPY(ac_val_base);
908  COPY(ac_val[0]);
909  COPY(ac_val[1]);
910  COPY(ac_val[2]);
911 #undef COPY
912 }
913 
915 {
916  MpegEncContext bak;
917  int i, ret;
918  // FIXME copy only needed parts
919  // START_TIMER
920  backup_duplicate_context(&bak, dst);
921  memcpy(dst, src, sizeof(MpegEncContext));
922  backup_duplicate_context(dst, &bak);
923  for (i = 0; i < 12; i++) {
924  dst->pblocks[i] = &dst->block[i];
925  }
926  if (dst->avctx->codec_tag == AV_RL32("VCR2"))
927  exchange_uv(dst);
928  if (!dst->edge_emu_buffer &&
929  (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
930  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
931  "scratch buffers.\n");
932  return ret;
933  }
934  // STOP_TIMER("update_duplicate_context")
935  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
936  return 0;
937 }
938 
940  const AVCodecContext *src)
941 {
942  int i, ret;
943  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
944 
945  if (dst == src)
946  return 0;
947 
948  av_assert0(s != s1);
949 
950  // FIXME can parameters change on I-frames?
951  // in that case dst may need a reinit
952  if (!s->context_initialized) {
953  memcpy(s, s1, sizeof(MpegEncContext));
954 
955  s->avctx = dst;
956  s->bitstream_buffer = NULL;
958 
959  if (s1->context_initialized){
960 // s->picture_range_start += MAX_PICTURE_COUNT;
961 // s->picture_range_end += MAX_PICTURE_COUNT;
962  if((ret = ff_MPV_common_init(s)) < 0){
963  memset(s, 0, sizeof(MpegEncContext));
964  s->avctx = dst;
965  return ret;
966  }
967  }
968  }
969 
970  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
971  s->context_reinit = 0;
972  s->height = s1->height;
973  s->width = s1->width;
974  if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
975  return ret;
976  }
977 
978  s->avctx->coded_height = s1->avctx->coded_height;
979  s->avctx->coded_width = s1->avctx->coded_width;
980  s->avctx->width = s1->avctx->width;
981  s->avctx->height = s1->avctx->height;
982 
983  s->coded_picture_number = s1->coded_picture_number;
984  s->picture_number = s1->picture_number;
985 
986  av_assert0(!s->picture || s->picture != s1->picture);
987  if(s->picture)
988  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
989  ff_mpeg_unref_picture(s, &s->picture[i]);
990  if (s1->picture[i].f->buf[0] &&
991  (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
992  return ret;
993  }
994 
995 #define UPDATE_PICTURE(pic)\
996 do {\
997  ff_mpeg_unref_picture(s, &s->pic);\
998  if (s1->pic.f && s1->pic.f->buf[0])\
999  ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
1000  else\
1001  ret = update_picture_tables(&s->pic, &s1->pic);\
1002  if (ret < 0)\
1003  return ret;\
1004 } while (0)
1005 
1006  UPDATE_PICTURE(current_picture);
1007  UPDATE_PICTURE(last_picture);
1008  UPDATE_PICTURE(next_picture);
1009 
1010  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1011  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1012  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1013 
1014  // Error/bug resilience
1015  s->next_p_frame_damaged = s1->next_p_frame_damaged;
1016  s->workaround_bugs = s1->workaround_bugs;
1017  s->padding_bug_score = s1->padding_bug_score;
1018 
1019  // MPEG4 timing info
1020  memcpy(&s->last_time_base, &s1->last_time_base,
1021  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1022  (char *) &s1->last_time_base);
1023 
1024  // B-frame info
1025  s->max_b_frames = s1->max_b_frames;
1026  s->low_delay = s1->low_delay;
1027  s->droppable = s1->droppable;
1028 
1029  // DivX handling (doesn't work)
1030  s->divx_packed = s1->divx_packed;
1031 
1032  if (s1->bitstream_buffer) {
1033  if (s1->bitstream_buffer_size +
1037  s1->allocated_bitstream_buffer_size);
1038  s->bitstream_buffer_size = s1->bitstream_buffer_size;
1039  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1040  s1->bitstream_buffer_size);
1041  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1043  }
1044 
1045  // linesize dependend scratch buffer allocation
1046  if (!s->edge_emu_buffer)
1047  if (s1->linesize) {
1048  if (frame_size_alloc(s, s1->linesize) < 0) {
1049  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1050  "scratch buffers.\n");
1051  return AVERROR(ENOMEM);
1052  }
1053  } else {
1054  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1055  "be allocated due to unknown size.\n");
1056  }
1057 
1058  // MPEG2/interlacing info
1059  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1060  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1061 
1062  if (!s1->first_field) {
1063  s->last_pict_type = s1->pict_type;
1064  if (s1->current_picture_ptr)
1065  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1066  }
1067 
1068  return 0;
1069 }
1070 
1071 /**
1072  * Set the given MpegEncContext to common defaults
1073  * (same for encoding and decoding).
1074  * The changed fields will not depend upon the
1075  * prior state of the MpegEncContext.
1076  */
1078 {
1079  s->y_dc_scale_table =
1082  s->progressive_frame = 1;
1083  s->progressive_sequence = 1;
1085 
1086  s->coded_picture_number = 0;
1087  s->picture_number = 0;
1088 
1089  s->f_code = 1;
1090  s->b_code = 1;
1091 
1092  s->slice_context_count = 1;
1093 }
1094 
1095 /**
1096  * Set the given MpegEncContext to defaults for decoding.
1097  * the changed fields will not depend upon
1098  * the prior state of the MpegEncContext.
1099  */
1101 {
1103 }
1104 
1106 {
1107  ERContext *er = &s->er;
1108  int mb_array_size = s->mb_height * s->mb_stride;
1109  int i;
1110 
1111  er->avctx = s->avctx;
1112  er->mecc = &s->mecc;
1113 
1114  er->mb_index2xy = s->mb_index2xy;
1115  er->mb_num = s->mb_num;
1116  er->mb_width = s->mb_width;
1117  er->mb_height = s->mb_height;
1118  er->mb_stride = s->mb_stride;
1119  er->b8_stride = s->b8_stride;
1120 
1122  er->error_status_table = av_mallocz(mb_array_size);
1123  if (!er->er_temp_buffer || !er->error_status_table)
1124  goto fail;
1125 
1126  er->mbskip_table = s->mbskip_table;
1127  er->mbintra_table = s->mbintra_table;
1128 
1129  for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1130  er->dc_val[i] = s->dc_val[i];
1131 
1133  er->opaque = s;
1134 
1135  return 0;
1136 fail:
1137  av_freep(&er->er_temp_buffer);
1139  return AVERROR(ENOMEM);
1140 }
1141 
1142 /**
1143  * Initialize and allocates MpegEncContext fields dependent on the resolution.
1144  */
1146 {
1147  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1148 
1149  s->mb_width = (s->width + 15) / 16;
1150  s->mb_stride = s->mb_width + 1;
1151  s->b8_stride = s->mb_width * 2 + 1;
1152  mb_array_size = s->mb_height * s->mb_stride;
1153  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1154 
1155  /* set default edge pos, will be overridden
1156  * in decode_header if needed */
1157  s->h_edge_pos = s->mb_width * 16;
1158  s->v_edge_pos = s->mb_height * 16;
1159 
1160  s->mb_num = s->mb_width * s->mb_height;
1161 
1162  s->block_wrap[0] =
1163  s->block_wrap[1] =
1164  s->block_wrap[2] =
1165  s->block_wrap[3] = s->b8_stride;
1166  s->block_wrap[4] =
1167  s->block_wrap[5] = s->mb_stride;
1168 
1169  y_size = s->b8_stride * (2 * s->mb_height + 1);
1170  c_size = s->mb_stride * (s->mb_height + 1);
1171  yc_size = y_size + 2 * c_size;
1172 
1173  if (s->mb_height & 1)
1174  yc_size += 2*s->b8_stride + 2*s->mb_stride;
1175 
1176  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1177  for (y = 0; y < s->mb_height; y++)
1178  for (x = 0; x < s->mb_width; x++)
1179  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1180 
1181  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1182 
1183  if (s->encoding) {
1184  /* Allocate MV tables */
1185  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1186  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1187  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1188  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1189  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1190  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1191  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1197 
1198  /* Allocate MB type table */
1199  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1200 
1201  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1202 
1204  mb_array_size * sizeof(float), fail);
1206  mb_array_size * sizeof(float), fail);
1207 
1208  }
1209 
1210  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1212  /* interlaced direct mode decoding tables */
1213  for (i = 0; i < 2; i++) {
1214  int j, k;
1215  for (j = 0; j < 2; j++) {
1216  for (k = 0; k < 2; k++) {
1218  s->b_field_mv_table_base[i][j][k],
1219  mv_table_size * 2 * sizeof(int16_t),
1220  fail);
1221  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1222  s->mb_stride + 1;
1223  }
1224  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1225  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1226  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1227  }
1228  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1229  }
1230  }
1231  if (s->out_format == FMT_H263) {
1232  /* cbp values */
1233  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1234  s->coded_block = s->coded_block_base + s->b8_stride + 1;
1235 
1236  /* cbp, ac_pred, pred_dir */
1237  FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1238  FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1239  }
1240 
1241  if (s->h263_pred || s->h263_plus || !s->encoding) {
1242  /* dc values */
1243  // MN: we need these for error resilience of intra-frames
1244  FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1245  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1246  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1247  s->dc_val[2] = s->dc_val[1] + c_size;
1248  for (i = 0; i < yc_size; i++)
1249  s->dc_val_base[i] = 1024;
1250  }
1251 
1252  /* which mb is a intra block */
1253  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1254  memset(s->mbintra_table, 1, mb_array_size);
1255 
1256  /* init macroblock skip table */
1257  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1258  // Note the + 1 is for a quicker mpeg4 slice_end detection
1259 
1260  return init_er(s);
1261 fail:
1262  return AVERROR(ENOMEM);
1263 }
1264 
1265 /**
1266  * init common structure for both encoder and decoder.
1267  * this assumes that some variables like width/height are already set
1268  */
1270 {
1271  int i;
1272  int nb_slices = (HAVE_THREADS &&
1274  s->avctx->thread_count : 1;
1275 
1276  if (s->encoding && s->avctx->slices)
1277  nb_slices = s->avctx->slices;
1278 
1280  s->mb_height = (s->height + 31) / 32 * 2;
1281  else
1282  s->mb_height = (s->height + 15) / 16;
1283 
1284  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1286  "decoding to AV_PIX_FMT_NONE is not supported.\n");
1287  return -1;
1288  }
1289 
1290  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1291  int max_slices;
1292  if (s->mb_height)
1293  max_slices = FFMIN(MAX_THREADS, s->mb_height);
1294  else
1295  max_slices = MAX_THREADS;
1296  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1297  " reducing to %d\n", nb_slices, max_slices);
1298  nb_slices = max_slices;
1299  }
1300 
1301  if ((s->width || s->height) &&
1302  av_image_check_size(s->width, s->height, 0, s->avctx))
1303  return -1;
1304 
1305  ff_dct_common_init(s);
1306 
1307  s->flags = s->avctx->flags;
1308  s->flags2 = s->avctx->flags2;
1309 
1310  /* set chroma shifts */
1312  &s->chroma_x_shift,
1313  &s->chroma_y_shift);
1314 
1315  /* convert fourcc to upper case */
1317 
1319 
1321  MAX_PICTURE_COUNT * sizeof(Picture), fail);
1322  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1323  s->picture[i].f = av_frame_alloc();
1324  if (!s->picture[i].f)
1325  goto fail;
1326  }
1327  memset(&s->next_picture, 0, sizeof(s->next_picture));
1328  memset(&s->last_picture, 0, sizeof(s->last_picture));
1329  memset(&s->current_picture, 0, sizeof(s->current_picture));
1330  memset(&s->new_picture, 0, sizeof(s->new_picture));
1331  s->next_picture.f = av_frame_alloc();
1332  if (!s->next_picture.f)
1333  goto fail;
1334  s->last_picture.f = av_frame_alloc();
1335  if (!s->last_picture.f)
1336  goto fail;
1338  if (!s->current_picture.f)
1339  goto fail;
1340  s->new_picture.f = av_frame_alloc();
1341  if (!s->new_picture.f)
1342  goto fail;
1343 
1344  if (init_context_frame(s))
1345  goto fail;
1346 
1347  s->parse_context.state = -1;
1348 
1349  s->context_initialized = 1;
1350  s->thread_context[0] = s;
1351 
1352 // if (s->width && s->height) {
1353  if (nb_slices > 1) {
1354  for (i = 1; i < nb_slices; i++) {
1355  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1356  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1357  }
1358 
1359  for (i = 0; i < nb_slices; i++) {
1360  if (init_duplicate_context(s->thread_context[i]) < 0)
1361  goto fail;
1362  s->thread_context[i]->start_mb_y =
1363  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1364  s->thread_context[i]->end_mb_y =
1365  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1366  }
1367  } else {
1368  if (init_duplicate_context(s) < 0)
1369  goto fail;
1370  s->start_mb_y = 0;
1371  s->end_mb_y = s->mb_height;
1372  }
1373  s->slice_context_count = nb_slices;
1374 // }
1375 
1376  return 0;
1377  fail:
1378  ff_MPV_common_end(s);
1379  return -1;
1380 }
1381 
1382 /**
1383  * Frees and resets MpegEncContext fields depending on the resolution.
1384  * Is used during resolution changes to avoid a full reinitialization of the
1385  * codec.
1386  */
1388 {
1389  int i, j, k;
1390 
1391  av_freep(&s->mb_type);
1398  s->p_mv_table = NULL;
1399  s->b_forw_mv_table = NULL;
1400  s->b_back_mv_table = NULL;
1401  s->b_bidir_forw_mv_table = NULL;
1402  s->b_bidir_back_mv_table = NULL;
1403  s->b_direct_mv_table = NULL;
1404  for (i = 0; i < 2; i++) {
1405  for (j = 0; j < 2; j++) {
1406  for (k = 0; k < 2; k++) {
1407  av_freep(&s->b_field_mv_table_base[i][j][k]);
1408  s->b_field_mv_table[i][j][k] = NULL;
1409  }
1410  av_freep(&s->b_field_select_table[i][j]);
1411  av_freep(&s->p_field_mv_table_base[i][j]);
1412  s->p_field_mv_table[i][j] = NULL;
1413  }
1415  }
1416 
1417  av_freep(&s->dc_val_base);
1419  av_freep(&s->mbintra_table);
1420  av_freep(&s->cbp_table);
1421  av_freep(&s->pred_dir_table);
1422 
1423  av_freep(&s->mbskip_table);
1424 
1426  av_freep(&s->er.er_temp_buffer);
1427  av_freep(&s->mb_index2xy);
1428  av_freep(&s->lambda_table);
1429 
1430  av_freep(&s->cplx_tab);
1431  av_freep(&s->bits_tab);
1432 
1433  s->linesize = s->uvlinesize = 0;
1434 
1435  return 0;
1436 }
1437 
1439 {
1440  int i, err = 0;
1441 
1442  if (s->slice_context_count > 1) {
1443  for (i = 0; i < s->slice_context_count; i++) {
1445  }
1446  for (i = 1; i < s->slice_context_count; i++) {
1447  av_freep(&s->thread_context[i]);
1448  }
1449  } else
1451 
1452  if ((err = free_context_frame(s)) < 0)
1453  return err;
1454 
1455  if (s->picture)
1456  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1457  s->picture[i].needs_realloc = 1;
1458  }
1459 
1460  s->last_picture_ptr =
1461  s->next_picture_ptr =
1462  s->current_picture_ptr = NULL;
1463 
1464  // init
1466  s->mb_height = (s->height + 31) / 32 * 2;
1467  else
1468  s->mb_height = (s->height + 15) / 16;
1469 
1470  if ((s->width || s->height) &&
1471  av_image_check_size(s->width, s->height, 0, s->avctx))
1472  return AVERROR_INVALIDDATA;
1473 
1474  if ((err = init_context_frame(s)))
1475  goto fail;
1476 
1477  s->thread_context[0] = s;
1478 
1479  if (s->width && s->height) {
1480  int nb_slices = s->slice_context_count;
1481  if (nb_slices > 1) {
1482  for (i = 1; i < nb_slices; i++) {
1483  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1484  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1485  }
1486 
1487  for (i = 0; i < nb_slices; i++) {
1488  if (init_duplicate_context(s->thread_context[i]) < 0)
1489  goto fail;
1490  s->thread_context[i]->start_mb_y =
1491  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1492  s->thread_context[i]->end_mb_y =
1493  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1494  }
1495  } else {
1496  err = init_duplicate_context(s);
1497  if (err < 0)
1498  goto fail;
1499  s->start_mb_y = 0;
1500  s->end_mb_y = s->mb_height;
1501  }
1502  s->slice_context_count = nb_slices;
1503  }
1504 
1505  return 0;
1506  fail:
1507  ff_MPV_common_end(s);
1508  return err;
1509 }
1510 
1511 /* init common structure for both encoder and decoder */
1513 {
1514  int i;
1515 
1516  if (s->slice_context_count > 1) {
1517  for (i = 0; i < s->slice_context_count; i++) {
1519  }
1520  for (i = 1; i < s->slice_context_count; i++) {
1521  av_freep(&s->thread_context[i]);
1522  }
1523  s->slice_context_count = 1;
1524  } else free_duplicate_context(s);
1525 
1527  s->parse_context.buffer_size = 0;
1528 
1531 
1532  if (s->picture) {
1533  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1535  ff_mpeg_unref_picture(s, &s->picture[i]);
1536  av_frame_free(&s->picture[i].f);
1537  }
1538  }
1539  av_freep(&s->picture);
1552 
1553  free_context_frame(s);
1554 
1555  s->context_initialized = 0;
1556  s->last_picture_ptr =
1557  s->next_picture_ptr =
1558  s->current_picture_ptr = NULL;
1559  s->linesize = s->uvlinesize = 0;
1560 }
1561 
1563  uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1564 {
1565  int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1566  uint8_t index_run[MAX_RUN + 1];
1567  int last, run, level, start, end, i;
1568 
1569  /* If table is static, we can quit if rl->max_level[0] is not NULL */
1570  if (static_store && rl->max_level[0])
1571  return;
1572 
1573  /* compute max_level[], max_run[] and index_run[] */
1574  for (last = 0; last < 2; last++) {
1575  if (last == 0) {
1576  start = 0;
1577  end = rl->last;
1578  } else {
1579  start = rl->last;
1580  end = rl->n;
1581  }
1582 
1583  memset(max_level, 0, MAX_RUN + 1);
1584  memset(max_run, 0, MAX_LEVEL + 1);
1585  memset(index_run, rl->n, MAX_RUN + 1);
1586  for (i = start; i < end; i++) {
1587  run = rl->table_run[i];
1588  level = rl->table_level[i];
1589  if (index_run[run] == rl->n)
1590  index_run[run] = i;
1591  if (level > max_level[run])
1592  max_level[run] = level;
1593  if (run > max_run[level])
1594  max_run[level] = run;
1595  }
1596  if (static_store)
1597  rl->max_level[last] = static_store[last];
1598  else
1599  rl->max_level[last] = av_malloc(MAX_RUN + 1);
1600  memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1601  if (static_store)
1602  rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1603  else
1604  rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1605  memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1606  if (static_store)
1607  rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1608  else
1609  rl->index_run[last] = av_malloc(MAX_RUN + 1);
1610  memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1611  }
1612 }
1613 
1615 {
1616  int i, q;
1617 
1618  for (q = 0; q < 32; q++) {
1619  int qmul = q * 2;
1620  int qadd = (q - 1) | 1;
1621 
1622  if (q == 0) {
1623  qmul = 1;
1624  qadd = 0;
1625  }
1626  for (i = 0; i < rl->vlc.table_size; i++) {
1627  int code = rl->vlc.table[i][0];
1628  int len = rl->vlc.table[i][1];
1629  int level, run;
1630 
1631  if (len == 0) { // illegal code
1632  run = 66;
1633  level = MAX_LEVEL;
1634  } else if (len < 0) { // more bits needed
1635  run = 0;
1636  level = code;
1637  } else {
1638  if (code == rl->n) { // esc
1639  run = 66;
1640  level = 0;
1641  } else {
1642  run = rl->table_run[code] + 1;
1643  level = rl->table_level[code] * qmul + qadd;
1644  if (code >= rl->last) run += 192;
1645  }
1646  }
1647  rl->rl_vlc[q][i].len = len;
1648  rl->rl_vlc[q][i].level = level;
1649  rl->rl_vlc[q][i].run = run;
1650  }
1651  }
1652 }
1653 
1655 {
1656  int i;
1657 
1658  /* release non reference frames */
1659  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1660  if (!s->picture[i].reference)
1661  ff_mpeg_unref_picture(s, &s->picture[i]);
1662  }
1663 }
1664 
1665 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1666 {
1667  if (pic == s->last_picture_ptr)
1668  return 0;
1669  if (pic->f->buf[0] == NULL)
1670  return 1;
1671  if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1672  return 1;
1673  return 0;
1674 }
1675 
1676 static int find_unused_picture(MpegEncContext *s, int shared)
1677 {
1678  int i;
1679 
1680  if (shared) {
1681  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1682  if (s->picture[i].f->buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1683  return i;
1684  }
1685  } else {
1686  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1687  if (pic_is_unused(s, &s->picture[i]))
1688  return i;
1689  }
1690  }
1691 
1693  "Internal error, picture buffer overflow\n");
1694  /* We could return -1, but the codec would crash trying to draw into a
1695  * non-existing frame anyway. This is safer than waiting for a random crash.
1696  * Also the return of this is never useful, an encoder must only allocate
1697  * as much as allowed in the specification. This has no relationship to how
1698  * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1699  * enough for such valid streams).
1700  * Plus, a decoder has to check stream validity and remove frames if too
1701  * many reference frames are around. Waiting for "OOM" is not correct at
1702  * all. Similarly, missing reference frames have to be replaced by
1703  * interpolated/MC frames, anything else is a bug in the codec ...
1704  */
1705  abort();
1706  return -1;
1707 }
1708 
1710 {
1711  int ret = find_unused_picture(s, shared);
1712 
1713  if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1714  if (s->picture[ret].needs_realloc) {
1715  s->picture[ret].needs_realloc = 0;
1716  ff_free_picture_tables(&s->picture[ret]);
1717  ff_mpeg_unref_picture(s, &s->picture[ret]);
1718  }
1719  }
1720  return ret;
1721 }
1722 
1723 static void gray_frame(AVFrame *frame)
1724 {
1725  int i, h_chroma_shift, v_chroma_shift;
1726 
1727  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1728 
1729  for(i=0; i<frame->height; i++)
1730  memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1731  for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1732  memset(frame->data[1] + frame->linesize[1]*i,
1733  0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1734  memset(frame->data[2] + frame->linesize[2]*i,
1735  0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1736  }
1737 }
1738 
1739 /**
1740  * generic function called after decoding
1741  * the header and before a frame is decoded.
1742  */
1744 {
1745  int i, ret;
1746  Picture *pic;
1747  s->mb_skipped = 0;
1748 
1749  if (!ff_thread_can_start_frame(avctx)) {
1750  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1751  return -1;
1752  }
1753 
1754  /* mark & release old frames */
1755  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1757  s->last_picture_ptr->f->buf[0]) {
1759  }
1760 
1761  /* release forgotten pictures */
1762  /* if (mpeg124/h263) */
1763  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1764  if (&s->picture[i] != s->last_picture_ptr &&
1765  &s->picture[i] != s->next_picture_ptr &&
1766  s->picture[i].reference && !s->picture[i].needs_realloc) {
1767  if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1768  av_log(avctx, AV_LOG_ERROR,
1769  "releasing zombie picture\n");
1770  ff_mpeg_unref_picture(s, &s->picture[i]);
1771  }
1772  }
1773 
1775 
1777 
1778  if (s->current_picture_ptr &&
1779  s->current_picture_ptr->f->buf[0] == NULL) {
1780  // we already have a unused image
1781  // (maybe it was set before reading the header)
1782  pic = s->current_picture_ptr;
1783  } else {
1784  i = ff_find_unused_picture(s, 0);
1785  if (i < 0) {
1786  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1787  return i;
1788  }
1789  pic = &s->picture[i];
1790  }
1791 
1792  pic->reference = 0;
1793  if (!s->droppable) {
1794  if (s->pict_type != AV_PICTURE_TYPE_B)
1795  pic->reference = 3;
1796  }
1797 
1799 
1800  if (ff_alloc_picture(s, pic, 0) < 0)
1801  return -1;
1802 
1803  s->current_picture_ptr = pic;
1804  // FIXME use only the vars from current_pic
1806  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1808  if (s->picture_structure != PICT_FRAME)
1811  }
1815 
1817  // if (s->flags && CODEC_FLAG_QSCALE)
1818  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1820 
1821  if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1822  s->current_picture_ptr)) < 0)
1823  return ret;
1824 
1825  if (s->pict_type != AV_PICTURE_TYPE_B) {
1827  if (!s->droppable)
1829  }
1830  av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1832  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1833  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1834  s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1835  s->pict_type, s->droppable);
1836 
1837  if ((s->last_picture_ptr == NULL ||
1838  s->last_picture_ptr->f->buf[0] == NULL) &&
1839  (s->pict_type != AV_PICTURE_TYPE_I ||
1840  s->picture_structure != PICT_FRAME)) {
1841  int h_chroma_shift, v_chroma_shift;
1843  &h_chroma_shift, &v_chroma_shift);
1845  av_log(avctx, AV_LOG_DEBUG,
1846  "allocating dummy last picture for B frame\n");
1847  else if (s->pict_type != AV_PICTURE_TYPE_I)
1848  av_log(avctx, AV_LOG_ERROR,
1849  "warning: first frame is no keyframe\n");
1850  else if (s->picture_structure != PICT_FRAME)
1851  av_log(avctx, AV_LOG_DEBUG,
1852  "allocate dummy last picture for field based first keyframe\n");
1853 
1854  /* Allocate a dummy frame */
1855  i = ff_find_unused_picture(s, 0);
1856  if (i < 0) {
1857  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1858  return i;
1859  }
1860  s->last_picture_ptr = &s->picture[i];
1861 
1862  s->last_picture_ptr->reference = 3;
1863  s->last_picture_ptr->f->key_frame = 0;
1865 
1866  if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1867  s->last_picture_ptr = NULL;
1868  return -1;
1869  }
1870 
1871  if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1872  for(i=0; i<avctx->height; i++)
1873  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1874  0x80, avctx->width);
1875  for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1876  memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1877  0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1878  memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1879  0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1880  }
1881 
1883  for(i=0; i<avctx->height; i++)
1884  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1885  }
1886  }
1887 
1888  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1889  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1890  }
1891  if ((s->next_picture_ptr == NULL ||
1892  s->next_picture_ptr->f->buf[0] == NULL) &&
1893  s->pict_type == AV_PICTURE_TYPE_B) {
1894  /* Allocate a dummy frame */
1895  i = ff_find_unused_picture(s, 0);
1896  if (i < 0) {
1897  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1898  return i;
1899  }
1900  s->next_picture_ptr = &s->picture[i];
1901 
1902  s->next_picture_ptr->reference = 3;
1903  s->next_picture_ptr->f->key_frame = 0;
1905 
1906  if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1907  s->next_picture_ptr = NULL;
1908  return -1;
1909  }
1910  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1911  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1912  }
1913 
1914 #if 0 // BUFREF-FIXME
1915  memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1916  memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1917 #endif
1918  if (s->last_picture_ptr) {
1920  if (s->last_picture_ptr->f->buf[0] &&
1921  (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1922  s->last_picture_ptr)) < 0)
1923  return ret;
1924  }
1925  if (s->next_picture_ptr) {
1927  if (s->next_picture_ptr->f->buf[0] &&
1928  (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1929  s->next_picture_ptr)) < 0)
1930  return ret;
1931  }
1932 
1934  s->last_picture_ptr->f->buf[0]));
1935 
1936  if (s->picture_structure!= PICT_FRAME) {
1937  int i;
1938  for (i = 0; i < 4; i++) {
1940  s->current_picture.f->data[i] +=
1941  s->current_picture.f->linesize[i];
1942  }
1943  s->current_picture.f->linesize[i] *= 2;
1944  s->last_picture.f->linesize[i] *= 2;
1945  s->next_picture.f->linesize[i] *= 2;
1946  }
1947  }
1948 
1949  s->err_recognition = avctx->err_recognition;
1950 
1951  /* set dequantizer, we can't do it during init as
1952  * it might change for mpeg4 and we can't do it in the header
1953  * decode as init is not called for mpeg4 there yet */
1954  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1957  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1960  } else {
1963  }
1964 
1965  if (s->avctx->debug & FF_DEBUG_NOMC) {
1967  }
1968 
1969  return 0;
1970 }
1971 
1972 /* called after a frame has been decoded. */
1974 {
1975  emms_c();
1976 
1977  if (s->current_picture.reference)
1979 }
1980 
1981 
1982 static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
1983 {
1984  if(*sx > *ex)
1985  return clip_line(ex, ey, sx, sy, maxx);
1986 
1987  if (*sx < 0) {
1988  if (*ex < 0)
1989  return 1;
1990  *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
1991  *sx = 0;
1992  }
1993 
1994  if (*ex > maxx) {
1995  if (*sx > maxx)
1996  return 1;
1997  *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
1998  *ex = maxx;
1999  }
2000  return 0;
2001 }
2002 
2003 
2004 /**
2005  * Draw a line from (ex, ey) -> (sx, sy).
2006  * @param w width of the image
2007  * @param h height of the image
2008  * @param stride stride/linesize of the image
2009  * @param color color of the arrow
2010  */
2011 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
2012  int w, int h, int stride, int color)
2013 {
2014  int x, y, fr, f;
2015 
2016  if (clip_line(&sx, &sy, &ex, &ey, w - 1))
2017  return;
2018  if (clip_line(&sy, &sx, &ey, &ex, h - 1))
2019  return;
2020 
2021  sx = av_clip(sx, 0, w - 1);
2022  sy = av_clip(sy, 0, h - 1);
2023  ex = av_clip(ex, 0, w - 1);
2024  ey = av_clip(ey, 0, h - 1);
2025 
2026  buf[sy * stride + sx] += color;
2027 
2028  if (FFABS(ex - sx) > FFABS(ey - sy)) {
2029  if (sx > ex) {
2030  FFSWAP(int, sx, ex);
2031  FFSWAP(int, sy, ey);
2032  }
2033  buf += sx + sy * stride;
2034  ex -= sx;
2035  f = ((ey - sy) << 16) / ex;
2036  for (x = 0; x <= ex; x++) {
2037  y = (x * f) >> 16;
2038  fr = (x * f) & 0xFFFF;
2039  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2040  if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2041  }
2042  } else {
2043  if (sy > ey) {
2044  FFSWAP(int, sx, ex);
2045  FFSWAP(int, sy, ey);
2046  }
2047  buf += sx + sy * stride;
2048  ey -= sy;
2049  if (ey)
2050  f = ((ex - sx) << 16) / ey;
2051  else
2052  f = 0;
2053  for(y= 0; y <= ey; y++){
2054  x = (y*f) >> 16;
2055  fr = (y*f) & 0xFFFF;
2056  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2057  if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2058  }
2059  }
2060 }
2061 
2062 /**
2063  * Draw an arrow from (ex, ey) -> (sx, sy).
2064  * @param w width of the image
2065  * @param h height of the image
2066  * @param stride stride/linesize of the image
2067  * @param color color of the arrow
2068  */
2069 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2070  int ey, int w, int h, int stride, int color, int tail, int direction)
2071 {
2072  int dx,dy;
2073 
2074  if (direction) {
2075  FFSWAP(int, sx, ex);
2076  FFSWAP(int, sy, ey);
2077  }
2078 
2079  sx = av_clip(sx, -100, w + 100);
2080  sy = av_clip(sy, -100, h + 100);
2081  ex = av_clip(ex, -100, w + 100);
2082  ey = av_clip(ey, -100, h + 100);
2083 
2084  dx = ex - sx;
2085  dy = ey - sy;
2086 
2087  if (dx * dx + dy * dy > 3 * 3) {
2088  int rx = dx + dy;
2089  int ry = -dx + dy;
2090  int length = ff_sqrt((rx * rx + ry * ry) << 8);
2091 
2092  // FIXME subpixel accuracy
2093  rx = ROUNDED_DIV(rx * 3 << 4, length);
2094  ry = ROUNDED_DIV(ry * 3 << 4, length);
2095 
2096  if (tail) {
2097  rx = -rx;
2098  ry = -ry;
2099  }
2100 
2101  draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2102  draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2103  }
2104  draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2105 }
2106 
2107 /**
2108  * Print debugging info for the given picture.
2109  */
2110 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2111  uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2112  int *low_delay,
2113  int mb_width, int mb_height, int mb_stride, int quarter_sample)
2114 {
2115  if (avctx->hwaccel || !mbtype_table
2117  return;
2118 
2119 
2120  if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2121  int x,y;
2122 
2123  av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2125  for (y = 0; y < mb_height; y++) {
2126  for (x = 0; x < mb_width; x++) {
2127  if (avctx->debug & FF_DEBUG_SKIP) {
2128  int count = mbskip_table[x + y * mb_stride];
2129  if (count > 9)
2130  count = 9;
2131  av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2132  }
2133  if (avctx->debug & FF_DEBUG_QP) {
2134  av_log(avctx, AV_LOG_DEBUG, "%2d",
2135  qscale_table[x + y * mb_stride]);
2136  }
2137  if (avctx->debug & FF_DEBUG_MB_TYPE) {
2138  int mb_type = mbtype_table[x + y * mb_stride];
2139  // Type & MV direction
2140  if (IS_PCM(mb_type))
2141  av_log(avctx, AV_LOG_DEBUG, "P");
2142  else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2143  av_log(avctx, AV_LOG_DEBUG, "A");
2144  else if (IS_INTRA4x4(mb_type))
2145  av_log(avctx, AV_LOG_DEBUG, "i");
2146  else if (IS_INTRA16x16(mb_type))
2147  av_log(avctx, AV_LOG_DEBUG, "I");
2148  else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2149  av_log(avctx, AV_LOG_DEBUG, "d");
2150  else if (IS_DIRECT(mb_type))
2151  av_log(avctx, AV_LOG_DEBUG, "D");
2152  else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2153  av_log(avctx, AV_LOG_DEBUG, "g");
2154  else if (IS_GMC(mb_type))
2155  av_log(avctx, AV_LOG_DEBUG, "G");
2156  else if (IS_SKIP(mb_type))
2157  av_log(avctx, AV_LOG_DEBUG, "S");
2158  else if (!USES_LIST(mb_type, 1))
2159  av_log(avctx, AV_LOG_DEBUG, ">");
2160  else if (!USES_LIST(mb_type, 0))
2161  av_log(avctx, AV_LOG_DEBUG, "<");
2162  else {
2163  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2164  av_log(avctx, AV_LOG_DEBUG, "X");
2165  }
2166 
2167  // segmentation
2168  if (IS_8X8(mb_type))
2169  av_log(avctx, AV_LOG_DEBUG, "+");
2170  else if (IS_16X8(mb_type))
2171  av_log(avctx, AV_LOG_DEBUG, "-");
2172  else if (IS_8X16(mb_type))
2173  av_log(avctx, AV_LOG_DEBUG, "|");
2174  else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2175  av_log(avctx, AV_LOG_DEBUG, " ");
2176  else
2177  av_log(avctx, AV_LOG_DEBUG, "?");
2178 
2179 
2180  if (IS_INTERLACED(mb_type))
2181  av_log(avctx, AV_LOG_DEBUG, "=");
2182  else
2183  av_log(avctx, AV_LOG_DEBUG, " ");
2184  }
2185  }
2186  av_log(avctx, AV_LOG_DEBUG, "\n");
2187  }
2188  }
2189 
2190  if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2191  (avctx->debug_mv)) {
2192  const int shift = 1 + quarter_sample;
2193  int mb_y;
2194  uint8_t *ptr;
2195  int i;
2196  int h_chroma_shift, v_chroma_shift, block_height;
2197  const int width = avctx->width;
2198  const int height = avctx->height;
2199  const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2200  const int mv_stride = (mb_width << mv_sample_log2) +
2201  (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2202 
2203  *low_delay = 0; // needed to see the vectors without trashing the buffers
2204 
2205  avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2206 
2207  av_frame_make_writable(pict);
2208 
2209  pict->opaque = NULL;
2210  ptr = pict->data[0];
2211  block_height = 16 >> v_chroma_shift;
2212 
2213  for (mb_y = 0; mb_y < mb_height; mb_y++) {
2214  int mb_x;
2215  for (mb_x = 0; mb_x < mb_width; mb_x++) {
2216  const int mb_index = mb_x + mb_y * mb_stride;
2217  if ((avctx->debug_mv) && motion_val[0]) {
2218  int type;
2219  for (type = 0; type < 3; type++) {
2220  int direction = 0;
2221  switch (type) {
2222  case 0:
2223  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2224  (pict->pict_type!= AV_PICTURE_TYPE_P))
2225  continue;
2226  direction = 0;
2227  break;
2228  case 1:
2229  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2230  (pict->pict_type!= AV_PICTURE_TYPE_B))
2231  continue;
2232  direction = 0;
2233  break;
2234  case 2:
2235  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2236  (pict->pict_type!= AV_PICTURE_TYPE_B))
2237  continue;
2238  direction = 1;
2239  break;
2240  }
2241  if (!USES_LIST(mbtype_table[mb_index], direction))
2242  continue;
2243 
2244  if (IS_8X8(mbtype_table[mb_index])) {
2245  int i;
2246  for (i = 0; i < 4; i++) {
2247  int sx = mb_x * 16 + 4 + 8 * (i & 1);
2248  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2249  int xy = (mb_x * 2 + (i & 1) +
2250  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2251  int mx = (motion_val[direction][xy][0] >> shift) + sx;
2252  int my = (motion_val[direction][xy][1] >> shift) + sy;
2253  draw_arrow(ptr, sx, sy, mx, my, width,
2254  height, pict->linesize[0], 100, 0, direction);
2255  }
2256  } else if (IS_16X8(mbtype_table[mb_index])) {
2257  int i;
2258  for (i = 0; i < 2; i++) {
2259  int sx = mb_x * 16 + 8;
2260  int sy = mb_y * 16 + 4 + 8 * i;
2261  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2262  int mx = (motion_val[direction][xy][0] >> shift);
2263  int my = (motion_val[direction][xy][1] >> shift);
2264 
2265  if (IS_INTERLACED(mbtype_table[mb_index]))
2266  my *= 2;
2267 
2268  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2269  height, pict->linesize[0], 100, 0, direction);
2270  }
2271  } else if (IS_8X16(mbtype_table[mb_index])) {
2272  int i;
2273  for (i = 0; i < 2; i++) {
2274  int sx = mb_x * 16 + 4 + 8 * i;
2275  int sy = mb_y * 16 + 8;
2276  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2277  int mx = motion_val[direction][xy][0] >> shift;
2278  int my = motion_val[direction][xy][1] >> shift;
2279 
2280  if (IS_INTERLACED(mbtype_table[mb_index]))
2281  my *= 2;
2282 
2283  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2284  height, pict->linesize[0], 100, 0, direction);
2285  }
2286  } else {
2287  int sx= mb_x * 16 + 8;
2288  int sy= mb_y * 16 + 8;
2289  int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2290  int mx= (motion_val[direction][xy][0]>>shift) + sx;
2291  int my= (motion_val[direction][xy][1]>>shift) + sy;
2292  draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2293  }
2294  }
2295  }
2296  if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2297  uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2298  0x0101010101010101ULL;
2299  int y;
2300  for (y = 0; y < block_height; y++) {
2301  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2302  (block_height * mb_y + y) *
2303  pict->linesize[1]) = c;
2304  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2305  (block_height * mb_y + y) *
2306  pict->linesize[2]) = c;
2307  }
2308  }
2309  if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2310  motion_val[0]) {
2311  int mb_type = mbtype_table[mb_index];
2312  uint64_t u,v;
2313  int y;
2314 #define COLOR(theta, r) \
2315  u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2316  v = (int)(128 + r * sin(theta * 3.141592 / 180));
2317 
2318 
2319  u = v = 128;
2320  if (IS_PCM(mb_type)) {
2321  COLOR(120, 48)
2322  } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2323  IS_INTRA16x16(mb_type)) {
2324  COLOR(30, 48)
2325  } else if (IS_INTRA4x4(mb_type)) {
2326  COLOR(90, 48)
2327  } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2328  // COLOR(120, 48)
2329  } else if (IS_DIRECT(mb_type)) {
2330  COLOR(150, 48)
2331  } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2332  COLOR(170, 48)
2333  } else if (IS_GMC(mb_type)) {
2334  COLOR(190, 48)
2335  } else if (IS_SKIP(mb_type)) {
2336  // COLOR(180, 48)
2337  } else if (!USES_LIST(mb_type, 1)) {
2338  COLOR(240, 48)
2339  } else if (!USES_LIST(mb_type, 0)) {
2340  COLOR(0, 48)
2341  } else {
2342  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2343  COLOR(300,48)
2344  }
2345 
2346  u *= 0x0101010101010101ULL;
2347  v *= 0x0101010101010101ULL;
2348  for (y = 0; y < block_height; y++) {
2349  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2350  (block_height * mb_y + y) * pict->linesize[1]) = u;
2351  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2352  (block_height * mb_y + y) * pict->linesize[2]) = v;
2353  }
2354 
2355  // segmentation
2356  if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2357  *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2358  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2359  *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2360  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2361  }
2362  if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2363  for (y = 0; y < 16; y++)
2364  pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2365  pict->linesize[0]] ^= 0x80;
2366  }
2367  if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2368  int dm = 1 << (mv_sample_log2 - 2);
2369  for (i = 0; i < 4; i++) {
2370  int sx = mb_x * 16 + 8 * (i & 1);
2371  int sy = mb_y * 16 + 8 * (i >> 1);
2372  int xy = (mb_x * 2 + (i & 1) +
2373  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2374  // FIXME bidir
2375  int32_t *mv = (int32_t *) &motion_val[0][xy];
2376  if (mv[0] != mv[dm] ||
2377  mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2378  for (y = 0; y < 8; y++)
2379  pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2380  if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2381  *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2382  pict->linesize[0]) ^= 0x8080808080808080ULL;
2383  }
2384  }
2385 
2386  if (IS_INTERLACED(mb_type) &&
2387  avctx->codec->id == AV_CODEC_ID_H264) {
2388  // hmm
2389  }
2390  }
2391  mbskip_table[mb_index] = 0;
2392  }
2393  }
2394  }
2395 }
2396 
2398 {
2400  p->qscale_table, p->motion_val, &s->low_delay,
2401  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2402 }
2403 
2405 {
2407  int offset = 2*s->mb_stride + 1;
2408  if(!ref)
2409  return AVERROR(ENOMEM);
2410  av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2411  ref->size -= offset;
2412  ref->data += offset;
2413  return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2414 }
2415 
2417  uint8_t *dest, uint8_t *src,
2418  int field_based, int field_select,
2419  int src_x, int src_y,
2420  int width, int height, ptrdiff_t stride,
2421  int h_edge_pos, int v_edge_pos,
2422  int w, int h, h264_chroma_mc_func *pix_op,
2423  int motion_x, int motion_y)
2424 {
2425  const int lowres = s->avctx->lowres;
2426  const int op_index = FFMIN(lowres, 3);
2427  const int s_mask = (2 << lowres) - 1;
2428  int emu = 0;
2429  int sx, sy;
2430 
2431  if (s->quarter_sample) {
2432  motion_x /= 2;
2433  motion_y /= 2;
2434  }
2435 
2436  sx = motion_x & s_mask;
2437  sy = motion_y & s_mask;
2438  src_x += motion_x >> lowres + 1;
2439  src_y += motion_y >> lowres + 1;
2440 
2441  src += src_y * stride + src_x;
2442 
2443  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2444  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2446  s->linesize, s->linesize,
2447  w + 1, (h + 1) << field_based,
2448  src_x, src_y << field_based,
2449  h_edge_pos, v_edge_pos);
2450  src = s->edge_emu_buffer;
2451  emu = 1;
2452  }
2453 
2454  sx = (sx << 2) >> lowres;
2455  sy = (sy << 2) >> lowres;
2456  if (field_select)
2457  src += s->linesize;
2458  pix_op[op_index](dest, src, stride, h, sx, sy);
2459  return emu;
2460 }
2461 
2462 /* apply one mpeg motion vector to the three components */
2464  uint8_t *dest_y,
2465  uint8_t *dest_cb,
2466  uint8_t *dest_cr,
2467  int field_based,
2468  int bottom_field,
2469  int field_select,
2470  uint8_t **ref_picture,
2471  h264_chroma_mc_func *pix_op,
2472  int motion_x, int motion_y,
2473  int h, int mb_y)
2474 {
2475  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2476  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2477  ptrdiff_t uvlinesize, linesize;
2478  const int lowres = s->avctx->lowres;
2479  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2480  const int block_s = 8>>lowres;
2481  const int s_mask = (2 << lowres) - 1;
2482  const int h_edge_pos = s->h_edge_pos >> lowres;
2483  const int v_edge_pos = s->v_edge_pos >> lowres;
2484  linesize = s->current_picture.f->linesize[0] << field_based;
2485  uvlinesize = s->current_picture.f->linesize[1] << field_based;
2486 
2487  // FIXME obviously not perfect but qpel will not work in lowres anyway
2488  if (s->quarter_sample) {
2489  motion_x /= 2;
2490  motion_y /= 2;
2491  }
2492 
2493  if(field_based){
2494  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2495  }
2496 
2497  sx = motion_x & s_mask;
2498  sy = motion_y & s_mask;
2499  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2500  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2501 
2502  if (s->out_format == FMT_H263) {
2503  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2504  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2505  uvsrc_x = src_x >> 1;
2506  uvsrc_y = src_y >> 1;
2507  } else if (s->out_format == FMT_H261) {
2508  // even chroma mv's are full pel in H261
2509  mx = motion_x / 4;
2510  my = motion_y / 4;
2511  uvsx = (2 * mx) & s_mask;
2512  uvsy = (2 * my) & s_mask;
2513  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2514  uvsrc_y = mb_y * block_s + (my >> lowres);
2515  } else {
2516  if(s->chroma_y_shift){
2517  mx = motion_x / 2;
2518  my = motion_y / 2;
2519  uvsx = mx & s_mask;
2520  uvsy = my & s_mask;
2521  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2522  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2523  } else {
2524  if(s->chroma_x_shift){
2525  //Chroma422
2526  mx = motion_x / 2;
2527  uvsx = mx & s_mask;
2528  uvsy = motion_y & s_mask;
2529  uvsrc_y = src_y;
2530  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2531  } else {
2532  //Chroma444
2533  uvsx = motion_x & s_mask;
2534  uvsy = motion_y & s_mask;
2535  uvsrc_x = src_x;
2536  uvsrc_y = src_y;
2537  }
2538  }
2539  }
2540 
2541  ptr_y = ref_picture[0] + src_y * linesize + src_x;
2542  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2543  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2544 
2545  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2546  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2547  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2548  linesize >> field_based, linesize >> field_based,
2549  17, 17 + field_based,
2550  src_x, src_y << field_based, h_edge_pos,
2551  v_edge_pos);
2552  ptr_y = s->edge_emu_buffer;
2553  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2554  uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2555  uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2556  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2557  uvlinesize >> field_based, uvlinesize >> field_based,
2558  9, 9 + field_based,
2559  uvsrc_x, uvsrc_y << field_based,
2560  h_edge_pos >> 1, v_edge_pos >> 1);
2561  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2562  uvlinesize >> field_based,uvlinesize >> field_based,
2563  9, 9 + field_based,
2564  uvsrc_x, uvsrc_y << field_based,
2565  h_edge_pos >> 1, v_edge_pos >> 1);
2566  ptr_cb = ubuf;
2567  ptr_cr = vbuf;
2568  }
2569  }
2570 
2571  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2572  if (bottom_field) {
2573  dest_y += s->linesize;
2574  dest_cb += s->uvlinesize;
2575  dest_cr += s->uvlinesize;
2576  }
2577 
2578  if (field_select) {
2579  ptr_y += s->linesize;
2580  ptr_cb += s->uvlinesize;
2581  ptr_cr += s->uvlinesize;
2582  }
2583 
2584  sx = (sx << 2) >> lowres;
2585  sy = (sy << 2) >> lowres;
2586  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2587 
2588  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2589  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2590  uvsx = (uvsx << 2) >> lowres;
2591  uvsy = (uvsy << 2) >> lowres;
2592  if (hc) {
2593  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2594  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2595  }
2596  }
2597  // FIXME h261 lowres loop filter
2598 }
2599 
2601  uint8_t *dest_cb, uint8_t *dest_cr,
2602  uint8_t **ref_picture,
2603  h264_chroma_mc_func * pix_op,
2604  int mx, int my)
2605 {
2606  const int lowres = s->avctx->lowres;
2607  const int op_index = FFMIN(lowres, 3);
2608  const int block_s = 8 >> lowres;
2609  const int s_mask = (2 << lowres) - 1;
2610  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2611  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2612  int emu = 0, src_x, src_y, sx, sy;
2613  ptrdiff_t offset;
2614  uint8_t *ptr;
2615 
2616  if (s->quarter_sample) {
2617  mx /= 2;
2618  my /= 2;
2619  }
2620 
2621  /* In case of 8X8, we construct a single chroma motion vector
2622  with a special rounding */
2623  mx = ff_h263_round_chroma(mx);
2624  my = ff_h263_round_chroma(my);
2625 
2626  sx = mx & s_mask;
2627  sy = my & s_mask;
2628  src_x = s->mb_x * block_s + (mx >> lowres + 1);
2629  src_y = s->mb_y * block_s + (my >> lowres + 1);
2630 
2631  offset = src_y * s->uvlinesize + src_x;
2632  ptr = ref_picture[1] + offset;
2633  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2634  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2636  s->uvlinesize, s->uvlinesize,
2637  9, 9,
2638  src_x, src_y, h_edge_pos, v_edge_pos);
2639  ptr = s->edge_emu_buffer;
2640  emu = 1;
2641  }
2642  sx = (sx << 2) >> lowres;
2643  sy = (sy << 2) >> lowres;
2644  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2645 
2646  ptr = ref_picture[2] + offset;
2647  if (emu) {
2649  s->uvlinesize, s->uvlinesize,
2650  9, 9,
2651  src_x, src_y, h_edge_pos, v_edge_pos);
2652  ptr = s->edge_emu_buffer;
2653  }
2654  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2655 }
2656 
2657 /**
2658  * motion compensation of a single macroblock
2659  * @param s context
2660  * @param dest_y luma destination pointer
2661  * @param dest_cb chroma cb/u destination pointer
2662  * @param dest_cr chroma cr/v destination pointer
2663  * @param dir direction (0->forward, 1->backward)
2664  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2665  * @param pix_op halfpel motion compensation function (average or put normally)
2666  * the motion vectors are taken from s->mv and the MV type from s->mv_type
2667  */
2668 static inline void MPV_motion_lowres(MpegEncContext *s,
2669  uint8_t *dest_y, uint8_t *dest_cb,
2670  uint8_t *dest_cr,
2671  int dir, uint8_t **ref_picture,
2672  h264_chroma_mc_func *pix_op)
2673 {
2674  int mx, my;
2675  int mb_x, mb_y, i;
2676  const int lowres = s->avctx->lowres;
2677  const int block_s = 8 >>lowres;
2678 
2679  mb_x = s->mb_x;
2680  mb_y = s->mb_y;
2681 
2682  switch (s->mv_type) {
2683  case MV_TYPE_16X16:
2684  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2685  0, 0, 0,
2686  ref_picture, pix_op,
2687  s->mv[dir][0][0], s->mv[dir][0][1],
2688  2 * block_s, mb_y);
2689  break;
2690  case MV_TYPE_8X8:
2691  mx = 0;
2692  my = 0;
2693  for (i = 0; i < 4; i++) {
2694  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2695  s->linesize) * block_s,
2696  ref_picture[0], 0, 0,
2697  (2 * mb_x + (i & 1)) * block_s,
2698  (2 * mb_y + (i >> 1)) * block_s,
2699  s->width, s->height, s->linesize,
2700  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2701  block_s, block_s, pix_op,
2702  s->mv[dir][i][0], s->mv[dir][i][1]);
2703 
2704  mx += s->mv[dir][i][0];
2705  my += s->mv[dir][i][1];
2706  }
2707 
2708  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2709  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2710  pix_op, mx, my);
2711  break;
2712  case MV_TYPE_FIELD:
2713  if (s->picture_structure == PICT_FRAME) {
2714  /* top field */
2715  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2716  1, 0, s->field_select[dir][0],
2717  ref_picture, pix_op,
2718  s->mv[dir][0][0], s->mv[dir][0][1],
2719  block_s, mb_y);
2720  /* bottom field */
2721  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2722  1, 1, s->field_select[dir][1],
2723  ref_picture, pix_op,
2724  s->mv[dir][1][0], s->mv[dir][1][1],
2725  block_s, mb_y);
2726  } else {
2727  if (s->picture_structure != s->field_select[dir][0] + 1 &&
2728  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2729  ref_picture = s->current_picture_ptr->f->data;
2730 
2731  }
2732  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2733  0, 0, s->field_select[dir][0],
2734  ref_picture, pix_op,
2735  s->mv[dir][0][0],
2736  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2737  }
2738  break;
2739  case MV_TYPE_16X8:
2740  for (i = 0; i < 2; i++) {
2741  uint8_t **ref2picture;
2742 
2743  if (s->picture_structure == s->field_select[dir][i] + 1 ||
2744  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2745  ref2picture = ref_picture;
2746  } else {
2747  ref2picture = s->current_picture_ptr->f->data;
2748  }
2749 
2750  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2751  0, 0, s->field_select[dir][i],
2752  ref2picture, pix_op,
2753  s->mv[dir][i][0], s->mv[dir][i][1] +
2754  2 * block_s * i, block_s, mb_y >> 1);
2755 
2756  dest_y += 2 * block_s * s->linesize;
2757  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2758  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2759  }
2760  break;
2761  case MV_TYPE_DMV:
2762  if (s->picture_structure == PICT_FRAME) {
2763  for (i = 0; i < 2; i++) {
2764  int j;
2765  for (j = 0; j < 2; j++) {
2766  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2767  1, j, j ^ i,
2768  ref_picture, pix_op,
2769  s->mv[dir][2 * i + j][0],
2770  s->mv[dir][2 * i + j][1],
2771  block_s, mb_y);
2772  }
2774  }
2775  } else {
2776  for (i = 0; i < 2; i++) {
2777  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2778  0, 0, s->picture_structure != i + 1,
2779  ref_picture, pix_op,
2780  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2781  2 * block_s, mb_y >> 1);
2782 
2783  // after put we make avg of the same block
2785 
2786  // opposite parity is always in the same
2787  // frame if this is second field
2788  if (!s->first_field) {
2789  ref_picture = s->current_picture_ptr->f->data;
2790  }
2791  }
2792  }
2793  break;
2794  default:
2795  av_assert2(0);
2796  }
2797 }
2798 
2799 /**
2800  * find the lowest MB row referenced in the MVs
2801  */
2803 {
2804  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2805  int my, off, i, mvs;
2806 
2807  if (s->picture_structure != PICT_FRAME || s->mcsel)
2808  goto unhandled;
2809 
2810  switch (s->mv_type) {
2811  case MV_TYPE_16X16:
2812  mvs = 1;
2813  break;
2814  case MV_TYPE_16X8:
2815  mvs = 2;
2816  break;
2817  case MV_TYPE_8X8:
2818  mvs = 4;
2819  break;
2820  default:
2821  goto unhandled;
2822  }
2823 
2824  for (i = 0; i < mvs; i++) {
2825  my = s->mv[dir][i][1]<<qpel_shift;
2826  my_max = FFMAX(my_max, my);
2827  my_min = FFMIN(my_min, my);
2828  }
2829 
2830  off = (FFMAX(-my_min, my_max) + 63) >> 6;
2831 
2832  return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2833 unhandled:
2834  return s->mb_height-1;
2835 }
2836 
2837 /* put block[] to dest[] */
2838 static inline void put_dct(MpegEncContext *s,
2839  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2840 {
2841  s->dct_unquantize_intra(s, block, i, qscale);
2842  s->idsp.idct_put(dest, line_size, block);
2843 }
2844 
2845 /* add block[] to dest[] */
2846 static inline void add_dct(MpegEncContext *s,
2847  int16_t *block, int i, uint8_t *dest, int line_size)
2848 {
2849  if (s->block_last_index[i] >= 0) {
2850  s->idsp.idct_add(dest, line_size, block);
2851  }
2852 }
2853 
2854 static inline void add_dequant_dct(MpegEncContext *s,
2855  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2856 {
2857  if (s->block_last_index[i] >= 0) {
2858  s->dct_unquantize_inter(s, block, i, qscale);
2859 
2860  s->idsp.idct_add(dest, line_size, block);
2861  }
2862 }
2863 
2864 /**
2865  * Clean dc, ac, coded_block for the current non-intra MB.
2866  */
2868 {
2869  int wrap = s->b8_stride;
2870  int xy = s->block_index[0];
2871 
2872  s->dc_val[0][xy ] =
2873  s->dc_val[0][xy + 1 ] =
2874  s->dc_val[0][xy + wrap] =
2875  s->dc_val[0][xy + 1 + wrap] = 1024;
2876  /* ac pred */
2877  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2878  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2879  if (s->msmpeg4_version>=3) {
2880  s->coded_block[xy ] =
2881  s->coded_block[xy + 1 ] =
2882  s->coded_block[xy + wrap] =
2883  s->coded_block[xy + 1 + wrap] = 0;
2884  }
2885  /* chroma */
2886  wrap = s->mb_stride;
2887  xy = s->mb_x + s->mb_y * wrap;
2888  s->dc_val[1][xy] =
2889  s->dc_val[2][xy] = 1024;
2890  /* ac pred */
2891  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2892  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2893 
2894  s->mbintra_table[xy]= 0;
2895 }
2896 
2897 /* generic function called after a macroblock has been parsed by the
2898  decoder or after it has been encoded by the encoder.
2899 
2900  Important variables used:
2901  s->mb_intra : true if intra macroblock
2902  s->mv_dir : motion vector direction
2903  s->mv_type : motion vector type
2904  s->mv : motion vector
2905  s->interlaced_dct : true if interlaced dct used (mpeg2)
2906  */
2907 static av_always_inline
2909  int lowres_flag, int is_mpeg12)
2910 {
2911  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2912 
2913  if (CONFIG_XVMC &&
2914  s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
2915  s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
2916  return;
2917  }
2918 
2919  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2920  /* print DCT coefficients */
2921  int i,j;
2922  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2923  for(i=0; i<6; i++){
2924  for(j=0; j<64; j++){
2925  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
2926  block[i][s->idsp.idct_permutation[j]]);
2927  }
2928  av_log(s->avctx, AV_LOG_DEBUG, "\n");
2929  }
2930  }
2931 
2932  s->current_picture.qscale_table[mb_xy] = s->qscale;
2933 
2934  /* update DC predictors for P macroblocks */
2935  if (!s->mb_intra) {
2936  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2937  if(s->mbintra_table[mb_xy])
2939  } else {
2940  s->last_dc[0] =
2941  s->last_dc[1] =
2942  s->last_dc[2] = 128 << s->intra_dc_precision;
2943  }
2944  }
2945  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2946  s->mbintra_table[mb_xy]=1;
2947 
2948  if ( (s->flags&CODEC_FLAG_PSNR)
2950  || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2951  uint8_t *dest_y, *dest_cb, *dest_cr;
2952  int dct_linesize, dct_offset;
2953  op_pixels_func (*op_pix)[4];
2954  qpel_mc_func (*op_qpix)[16];
2955  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2956  const int uvlinesize = s->current_picture.f->linesize[1];
2957  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2958  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2959 
2960  /* avoid copy if macroblock skipped in last frame too */
2961  /* skip only during decoding as we might trash the buffers during encoding a bit */
2962  if(!s->encoding){
2963  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2964 
2965  if (s->mb_skipped) {
2966  s->mb_skipped= 0;
2968  *mbskip_ptr = 1;
2969  } else if(!s->current_picture.reference) {
2970  *mbskip_ptr = 1;
2971  } else{
2972  *mbskip_ptr = 0; /* not skipped */
2973  }
2974  }
2975 
2976  dct_linesize = linesize << s->interlaced_dct;
2977  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2978 
2979  if(readable){
2980  dest_y= s->dest[0];
2981  dest_cb= s->dest[1];
2982  dest_cr= s->dest[2];
2983  }else{
2984  dest_y = s->b_scratchpad;
2985  dest_cb= s->b_scratchpad+16*linesize;
2986  dest_cr= s->b_scratchpad+32*linesize;
2987  }
2988 
2989  if (!s->mb_intra) {
2990  /* motion handling */
2991  /* decoding or more than one mb_type (MC was already done otherwise) */
2992  if(!s->encoding){
2993 
2994  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2995  if (s->mv_dir & MV_DIR_FORWARD) {
2998  0);
2999  }
3000  if (s->mv_dir & MV_DIR_BACKWARD) {
3003  0);
3004  }
3005  }
3006 
3007  if(lowres_flag){
3009 
3010  if (s->mv_dir & MV_DIR_FORWARD) {
3011  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3013  }
3014  if (s->mv_dir & MV_DIR_BACKWARD) {
3015  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3016  }
3017  }else{
3018  op_qpix = s->me.qpel_put;
3019  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3020  op_pix = s->hdsp.put_pixels_tab;
3021  }else{
3022  op_pix = s->hdsp.put_no_rnd_pixels_tab;
3023  }
3024  if (s->mv_dir & MV_DIR_FORWARD) {
3025  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3026  op_pix = s->hdsp.avg_pixels_tab;
3027  op_qpix= s->me.qpel_avg;
3028  }
3029  if (s->mv_dir & MV_DIR_BACKWARD) {
3030  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3031  }
3032  }
3033  }
3034 
3035  /* skip dequant / idct if we are really late ;) */
3036  if(s->avctx->skip_idct){
3039  || s->avctx->skip_idct >= AVDISCARD_ALL)
3040  goto skip_idct;
3041  }
3042 
3043  /* add dct residue */
3045  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3046  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3047  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3048  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3049  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3050 
3051  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3052  if (s->chroma_y_shift){
3053  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3054  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3055  }else{
3056  dct_linesize >>= 1;
3057  dct_offset >>=1;
3058  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3059  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3060  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3061  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3062  }
3063  }
3064  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3065  add_dct(s, block[0], 0, dest_y , dct_linesize);
3066  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3067  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3068  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3069 
3070  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3071  if(s->chroma_y_shift){//Chroma420
3072  add_dct(s, block[4], 4, dest_cb, uvlinesize);
3073  add_dct(s, block[5], 5, dest_cr, uvlinesize);
3074  }else{
3075  //chroma422
3076  dct_linesize = uvlinesize << s->interlaced_dct;
3077  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3078 
3079  add_dct(s, block[4], 4, dest_cb, dct_linesize);
3080  add_dct(s, block[5], 5, dest_cr, dct_linesize);
3081  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3082  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3083  if(!s->chroma_x_shift){//Chroma444
3084  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3085  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3086  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3087  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3088  }
3089  }
3090  }//fi gray
3091  }
3092  else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3093  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3094  }
3095  } else {
3096  /* dct only in intra block */
3098  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3099  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3100  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3101  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3102 
3103  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3104  if(s->chroma_y_shift){
3105  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3106  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3107  }else{
3108  dct_offset >>=1;
3109  dct_linesize >>=1;
3110  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3111  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3112  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3113  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3114  }
3115  }
3116  }else{
3117  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3118  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3119  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3120  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3121 
3122  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3123  if(s->chroma_y_shift){
3124  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3125  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3126  }else{
3127 
3128  dct_linesize = uvlinesize << s->interlaced_dct;
3129  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3130 
3131  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3132  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3133  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3134  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3135  if(!s->chroma_x_shift){//Chroma444
3136  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3137  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3138  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3139  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3140  }
3141  }
3142  }//gray
3143  }
3144  }
3145 skip_idct:
3146  if(!readable){
3147  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3148  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3149  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3150  }
3151  }
3152 }
3153 
3154 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
3155 #if !CONFIG_SMALL
3156  if(s->out_format == FMT_MPEG1) {
3157  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
3158  else MPV_decode_mb_internal(s, block, 0, 1);
3159  } else
3160 #endif
3161  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
3162  else MPV_decode_mb_internal(s, block, 0, 0);
3163 }
3164 
3166 {
3168  s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3169  s->first_field, s->low_delay);
3170 }
3171 
3172 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3173  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3174  const int uvlinesize = s->current_picture.f->linesize[1];
3175  const int mb_size= 4 - s->avctx->lowres;
3176 
3177  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3178  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3179  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3180  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3181  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3182  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3183  //block_index is not used by mpeg2, so it is not affected by chroma_format
3184 
3185  s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3186  s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3187  s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3188 
3190  {
3191  if(s->picture_structure==PICT_FRAME){
3192  s->dest[0] += s->mb_y * linesize << mb_size;
3193  s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3194  s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3195  }else{
3196  s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3197  s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3198  s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3200  }
3201  }
3202 }
3203 
3204 /**
3205  * Permute an 8x8 block.
3206  * @param block the block which will be permuted according to the given permutation vector
3207  * @param permutation the permutation vector
3208  * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3209  * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3210  * (inverse) permutated to scantable order!
3211  */
3212 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3213 {
3214  int i;
3215  int16_t temp[64];
3216 
3217  if(last<=0) return;
3218  //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3219 
3220  for(i=0; i<=last; i++){
3221  const int j= scantable[i];
3222  temp[j]= block[j];
3223  block[j]=0;
3224  }
3225 
3226  for(i=0; i<=last; i++){
3227  const int j= scantable[i];
3228  const int perm_j= permutation[j];
3229  block[perm_j]= temp[j];
3230  }
3231 }
3232 
3234  int i;
3235  MpegEncContext *s = avctx->priv_data;
3236 
3237  if(s==NULL || s->picture==NULL)
3238  return;
3239 
3240  for (i = 0; i < MAX_PICTURE_COUNT; i++)
3241  ff_mpeg_unref_picture(s, &s->picture[i]);
3243 
3247 
3248  s->mb_x= s->mb_y= 0;
3249  s->closed_gop= 0;
3250 
3251  s->parse_context.state= -1;
3253  s->parse_context.overread= 0;
3255  s->parse_context.index= 0;
3256  s->parse_context.last_index= 0;
3257  s->bitstream_buffer_size=0;
3258  s->pp_time=0;
3259 }
3260 
3261 /**
3262  * set qscale and update qscale dependent variables.
3263  */
3264 void ff_set_qscale(MpegEncContext * s, int qscale)
3265 {
3266  if (qscale < 1)
3267  qscale = 1;
3268  else if (qscale > 31)
3269  qscale = 31;
3270 
3271  s->qscale = qscale;
3272  s->chroma_qscale= s->chroma_qscale_table[qscale];
3273 
3274  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3276 }
3277 
3279 {
3282 }