FFmpeg
svq1enc.c
Go to the documentation of this file.
1 /*
2  * SVQ1 Encoder
3  * Copyright (C) 2004 Mike Melanson <melanson@pcisys.net>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Sorenson Vector Quantizer #1 (SVQ1) video codec.
25  * For more information of the SVQ1 algorithm, visit:
26  * http://www.pcisys.net/~melanson/codecs/
27  */
28 
29 #include "avcodec.h"
30 #include "hpeldsp.h"
31 #include "me_cmp.h"
32 #include "mpegvideo.h"
33 #include "h263.h"
34 #include "internal.h"
35 #include "mpegutils.h"
36 #include "svq1.h"
37 #include "svq1enc.h"
38 #include "svq1enc_cb.h"
39 #include "libavutil/avassert.h"
40 
41 
42 static void svq1_write_header(SVQ1EncContext *s, int frame_type)
43 {
44  int i;
45 
46  /* frame code */
47  put_bits(&s->pb, 22, 0x20);
48 
49  /* temporal reference (sure hope this is a "don't care") */
50  put_bits(&s->pb, 8, 0x00);
51 
52  /* frame type */
53  put_bits(&s->pb, 2, frame_type - 1);
54 
55  if (frame_type == AV_PICTURE_TYPE_I) {
56  /* no checksum since frame code is 0x20 */
57  /* no embedded string either */
58  /* output 5 unknown bits (2 + 2 + 1) */
59  put_bits(&s->pb, 5, 2); /* 2 needed by quicktime decoder */
60 
63  s->frame_width, s->frame_height);
64  put_bits(&s->pb, 3, i);
65 
66  if (i == 7) {
67  put_bits(&s->pb, 12, s->frame_width);
68  put_bits(&s->pb, 12, s->frame_height);
69  }
70  }
71 
72  /* no checksum or extra data (next 2 bits get 0) */
73  put_bits(&s->pb, 2, 0);
74 }
75 
76 #define QUALITY_THRESHOLD 100
77 #define THRESHOLD_MULTIPLIER 0.6
78 
79 static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
80  intptr_t size)
81 {
82  int score = 0, i;
83 
84  for (i = 0; i < size; i++)
85  score += (pix1[i] - pix2[i]) * (pix1[i] - pix2[i]);
86  return score;
87 }
88 
90  uint8_t *decoded, int stride, int level,
91  int threshold, int lambda, int intra)
92 {
93  int count, y, x, i, j, split, best_mean, best_score, best_count;
94  int best_vector[6];
95  int block_sum[7] = { 0, 0, 0, 0, 0, 0 };
96  int w = 2 << (level + 2 >> 1);
97  int h = 2 << (level + 1 >> 1);
98  int size = w * h;
99  int16_t (*block)[256] = s->encoded_block_levels[level];
100  const int8_t *codebook_sum, *codebook;
101  const uint16_t(*mean_vlc)[2];
102  const uint8_t(*multistage_vlc)[2];
103 
104  best_score = 0;
105  // FIXME: Optimize, this does not need to be done multiple times.
106  if (intra) {
107  // level is 5 when encode_block is called from svq1_encode_plane
108  // and always < 4 when called recursively from this function.
109  codebook_sum = level < 4 ? svq1_intra_codebook_sum[level] : NULL;
110  codebook = ff_svq1_intra_codebooks[level];
111  mean_vlc = ff_svq1_intra_mean_vlc;
112  multistage_vlc = ff_svq1_intra_multistage_vlc[level];
113  for (y = 0; y < h; y++) {
114  for (x = 0; x < w; x++) {
115  int v = src[x + y * stride];
116  block[0][x + w * y] = v;
117  best_score += v * v;
118  block_sum[0] += v;
119  }
120  }
121  } else {
122  // level is 5 or < 4, see above for details.
123  codebook_sum = level < 4 ? svq1_inter_codebook_sum[level] : NULL;
124  codebook = ff_svq1_inter_codebooks[level];
125  mean_vlc = ff_svq1_inter_mean_vlc + 256;
126  multistage_vlc = ff_svq1_inter_multistage_vlc[level];
127  for (y = 0; y < h; y++) {
128  for (x = 0; x < w; x++) {
129  int v = src[x + y * stride] - ref[x + y * stride];
130  block[0][x + w * y] = v;
131  best_score += v * v;
132  block_sum[0] += v;
133  }
134  }
135  }
136 
137  best_count = 0;
138  best_score -= (int)((unsigned)block_sum[0] * block_sum[0] >> (level + 3));
139  best_mean = block_sum[0] + (size >> 1) >> (level + 3);
140 
141  if (level < 4) {
142  for (count = 1; count < 7; count++) {
143  int best_vector_score = INT_MAX;
144  int best_vector_sum = -999, best_vector_mean = -999;
145  const int stage = count - 1;
146  const int8_t *vector;
147 
148  for (i = 0; i < 16; i++) {
149  int sum = codebook_sum[stage * 16 + i];
150  int sqr, diff, score;
151 
152  vector = codebook + stage * size * 16 + i * size;
153  sqr = s->ssd_int8_vs_int16(vector, block[stage], size);
154  diff = block_sum[stage] - sum;
155  score = sqr - (diff * (int64_t)diff >> (level + 3)); // FIXME: 64 bits slooow
156  if (score < best_vector_score) {
157  int mean = diff + (size >> 1) >> (level + 3);
158  av_assert2(mean > -300 && mean < 300);
159  mean = av_clip(mean, intra ? 0 : -256, 255);
160  best_vector_score = score;
161  best_vector[stage] = i;
162  best_vector_sum = sum;
163  best_vector_mean = mean;
164  }
165  }
166  av_assert0(best_vector_mean != -999);
167  vector = codebook + stage * size * 16 + best_vector[stage] * size;
168  for (j = 0; j < size; j++)
169  block[stage + 1][j] = block[stage][j] - vector[j];
170  block_sum[stage + 1] = block_sum[stage] - best_vector_sum;
171  best_vector_score += lambda *
172  (+1 + 4 * count +
173  multistage_vlc[1 + count][1]
174  + mean_vlc[best_vector_mean][1]);
175 
176  if (best_vector_score < best_score) {
177  best_score = best_vector_score;
178  best_count = count;
179  best_mean = best_vector_mean;
180  }
181  }
182  }
183 
184  split = 0;
185  if (best_score > threshold && level) {
186  int score = 0;
187  int offset = level & 1 ? stride * h / 2 : w / 2;
188  PutBitContext backup[6];
189 
190  for (i = level - 1; i >= 0; i--)
191  backup[i] = s->reorder_pb[i];
192  score += encode_block(s, src, ref, decoded, stride, level - 1,
193  threshold >> 1, lambda, intra);
194  score += encode_block(s, src + offset, ref + offset, decoded + offset,
195  stride, level - 1, threshold >> 1, lambda, intra);
196  score += lambda;
197 
198  if (score < best_score) {
199  best_score = score;
200  split = 1;
201  } else {
202  for (i = level - 1; i >= 0; i--)
203  s->reorder_pb[i] = backup[i];
204  }
205  }
206  if (level > 0)
207  put_bits(&s->reorder_pb[level], 1, split);
208 
209  if (!split) {
210  av_assert1(best_mean >= 0 && best_mean < 256 || !intra);
211  av_assert1(best_mean >= -256 && best_mean < 256);
212  av_assert1(best_count >= 0 && best_count < 7);
213  av_assert1(level < 4 || best_count == 0);
214 
215  /* output the encoding */
216  put_bits(&s->reorder_pb[level],
217  multistage_vlc[1 + best_count][1],
218  multistage_vlc[1 + best_count][0]);
219  put_bits(&s->reorder_pb[level], mean_vlc[best_mean][1],
220  mean_vlc[best_mean][0]);
221 
222  for (i = 0; i < best_count; i++) {
223  av_assert2(best_vector[i] >= 0 && best_vector[i] < 16);
224  put_bits(&s->reorder_pb[level], 4, best_vector[i]);
225  }
226 
227  for (y = 0; y < h; y++)
228  for (x = 0; x < w; x++)
229  decoded[x + y * stride] = src[x + y * stride] -
230  block[best_count][x + w * y] +
231  best_mean;
232  }
233 
234  return best_score;
235 }
236 
238  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) + s->mb_x*2;
239  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) + 1 + s->mb_x*2;
240  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) + s->mb_x*2;
241  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) + 1 + s->mb_x*2;
242  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x;
243  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x;
244 }
245 
247  unsigned char *src_plane,
248  unsigned char *ref_plane,
249  unsigned char *decoded_plane,
250  int width, int height, int src_stride, int stride)
251 {
252  int x, y;
253  int i;
254  int block_width, block_height;
255  int level;
256  int threshold[6];
257  uint8_t *src = s->scratchbuf + stride * 32;
258  const int lambda = (s->quality * s->quality) >>
259  (2 * FF_LAMBDA_SHIFT);
260 
261  /* figure out the acceptable level thresholds in advance */
262  threshold[5] = QUALITY_THRESHOLD;
263  for (level = 4; level >= 0; level--)
264  threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER;
265 
266  block_width = (width + 15) / 16;
267  block_height = (height + 15) / 16;
268 
269  if (s->pict_type == AV_PICTURE_TYPE_P) {
270  s->m.avctx = s->avctx;
272  s->m.last_picture_ptr = &s->m.last_picture;
273  s->m.last_picture.f->data[0] = ref_plane;
274  s->m.linesize =
275  s->m.last_picture.f->linesize[0] =
276  s->m.new_picture.f->linesize[0] =
277  s->m.current_picture.f->linesize[0] = stride;
278  s->m.width = width;
279  s->m.height = height;
280  s->m.mb_width = block_width;
281  s->m.mb_height = block_height;
282  s->m.mb_stride = s->m.mb_width + 1;
283  s->m.b8_stride = 2 * s->m.mb_width + 1;
284  s->m.f_code = 1;
285  s->m.pict_type = s->pict_type;
286  s->m.motion_est = s->motion_est;
287  s->m.me.scene_change_score = 0;
288  // s->m.out_format = FMT_H263;
289  // s->m.unrestricted_mv = 1;
290  s->m.lambda = s->quality;
291  s->m.qscale = s->m.lambda * 139 +
292  FF_LAMBDA_SCALE * 64 >>
293  FF_LAMBDA_SHIFT + 7;
294  s->m.lambda2 = s->m.lambda * s->m.lambda +
295  FF_LAMBDA_SCALE / 2 >>
297 
298  if (!s->motion_val8[plane]) {
300  block_height * 2 + 2) *
301  2 * sizeof(int16_t));
303  (block_height + 2) + 1) *
304  2 * sizeof(int16_t));
305  if (!s->motion_val8[plane] || !s->motion_val16[plane])
306  return AVERROR(ENOMEM);
307  }
308 
309  s->m.mb_type = s->mb_type;
310 
311  // dummies, to avoid segfaults
313  s->m.current_picture.mb_var = (uint16_t *)s->dummy;
314  s->m.current_picture.mc_mb_var = (uint16_t *)s->dummy;
315  s->m.current_picture.mb_type = s->dummy;
316 
317  s->m.current_picture.motion_val[0] = s->motion_val8[plane] + 2;
318  s->m.p_mv_table = s->motion_val16[plane] +
319  s->m.mb_stride + 1;
320  s->m.mecc = s->mecc; // move
321  ff_init_me(&s->m);
322 
323  s->m.me.dia_size = s->avctx->dia_size;
324  s->m.first_slice_line = 1;
325  for (y = 0; y < block_height; y++) {
326  s->m.new_picture.f->data[0] = src - y * 16 * stride; // ugly
327  s->m.mb_y = y;
328 
329  for (i = 0; i < 16 && i + 16 * y < height; i++) {
330  memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride],
331  width);
332  for (x = width; x < 16 * block_width; x++)
333  src[i * stride + x] = src[i * stride + x - 1];
334  }
335  for (; i < 16 && i + 16 * y < 16 * block_height; i++)
336  memcpy(&src[i * stride], &src[(i - 1) * stride],
337  16 * block_width);
338 
339  for (x = 0; x < block_width; x++) {
340  s->m.mb_x = x;
341  init_block_index(&s->m);
342 
343  ff_estimate_p_frame_motion(&s->m, x, y);
344  }
345  s->m.first_slice_line = 0;
346  }
347 
348  ff_fix_long_p_mvs(&s->m);
349  ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code,
351  }
352 
353  s->m.first_slice_line = 1;
354  for (y = 0; y < block_height; y++) {
355  for (i = 0; i < 16 && i + 16 * y < height; i++) {
356  memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride],
357  width);
358  for (x = width; x < 16 * block_width; x++)
359  src[i * stride + x] = src[i * stride + x - 1];
360  }
361  for (; i < 16 && i + 16 * y < 16 * block_height; i++)
362  memcpy(&src[i * stride], &src[(i - 1) * stride], 16 * block_width);
363 
364  s->m.mb_y = y;
365  for (x = 0; x < block_width; x++) {
366  uint8_t reorder_buffer[2][6][7 * 32];
367  int count[2][6];
368  int offset = y * 16 * stride + x * 16;
369  uint8_t *decoded = decoded_plane + offset;
370  uint8_t *ref = ref_plane + offset;
371  int score[4] = { 0, 0, 0, 0 }, best;
372  uint8_t *temp = s->scratchbuf;
373 
374  if (s->pb.buf_end - s->pb.buf -
375  (put_bits_count(&s->pb) >> 3) < 3000) { // FIXME: check size
376  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
377  return -1;
378  }
379 
380  s->m.mb_x = x;
381  init_block_index(&s->m);
382 
383  if (s->pict_type == AV_PICTURE_TYPE_I ||
384  (s->m.mb_type[x + y * s->m.mb_stride] &
386  for (i = 0; i < 6; i++)
387  init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i],
388  7 * 32);
389  if (s->pict_type == AV_PICTURE_TYPE_P) {
391  put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
392  score[0] = vlc[1] * lambda;
393  }
394  score[0] += encode_block(s, src + 16 * x, NULL, temp, stride,
395  5, 64, lambda, 1);
396  for (i = 0; i < 6; i++) {
397  count[0][i] = put_bits_count(&s->reorder_pb[i]);
398  flush_put_bits(&s->reorder_pb[i]);
399  }
400  } else
401  score[0] = INT_MAX;
402 
403  best = 0;
404 
405  if (s->pict_type == AV_PICTURE_TYPE_P) {
407  int mx, my, pred_x, pred_y, dxy;
408  int16_t *motion_ptr;
409 
410  motion_ptr = ff_h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
411  if (s->m.mb_type[x + y * s->m.mb_stride] &
413  for (i = 0; i < 6; i++)
414  init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i],
415  7 * 32);
416 
417  put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
418 
419  s->m.pb = s->reorder_pb[5];
420  mx = motion_ptr[0];
421  my = motion_ptr[1];
422  av_assert1(mx >= -32 && mx <= 31);
423  av_assert1(my >= -32 && my <= 31);
424  av_assert1(pred_x >= -32 && pred_x <= 31);
425  av_assert1(pred_y >= -32 && pred_y <= 31);
426  ff_h263_encode_motion(&s->m.pb, mx - pred_x, 1);
427  ff_h263_encode_motion(&s->m.pb, my - pred_y, 1);
428  s->reorder_pb[5] = s->m.pb;
429  score[1] += lambda * put_bits_count(&s->reorder_pb[5]);
430 
431  dxy = (mx & 1) + 2 * (my & 1);
432 
433  s->hdsp.put_pixels_tab[0][dxy](temp + 16*stride,
434  ref + (mx >> 1) +
435  stride * (my >> 1),
436  stride, 16);
437 
438  score[1] += encode_block(s, src + 16 * x, temp + 16*stride,
439  decoded, stride, 5, 64, lambda, 0);
440  best = score[1] <= score[0];
441 
443  score[2] = s->mecc.sse[0](NULL, src + 16 * x, ref,
444  stride, 16);
445  score[2] += vlc[1] * lambda;
446  if (score[2] < score[best] && mx == 0 && my == 0) {
447  best = 2;
448  s->hdsp.put_pixels_tab[0][0](decoded, ref, stride, 16);
449  put_bits(&s->pb, vlc[1], vlc[0]);
450  }
451  }
452 
453  if (best == 1) {
454  for (i = 0; i < 6; i++) {
455  count[1][i] = put_bits_count(&s->reorder_pb[i]);
456  flush_put_bits(&s->reorder_pb[i]);
457  }
458  } else {
459  motion_ptr[0] =
460  motion_ptr[1] =
461  motion_ptr[2] =
462  motion_ptr[3] =
463  motion_ptr[0 + 2 * s->m.b8_stride] =
464  motion_ptr[1 + 2 * s->m.b8_stride] =
465  motion_ptr[2 + 2 * s->m.b8_stride] =
466  motion_ptr[3 + 2 * s->m.b8_stride] = 0;
467  }
468  }
469 
470  s->rd_total += score[best];
471 
472  if (best != 2)
473  for (i = 5; i >= 0; i--)
474  avpriv_copy_bits(&s->pb, reorder_buffer[best][i],
475  count[best][i]);
476  if (best == 0)
477  s->hdsp.put_pixels_tab[0][0](decoded, temp, stride, 16);
478  }
479  s->m.first_slice_line = 0;
480  }
481  return 0;
482 }
483 
485 {
486  SVQ1EncContext *const s = avctx->priv_data;
487  int i;
488 
489  av_log(avctx, AV_LOG_DEBUG, "RD: %f\n",
490  s->rd_total / (double)(avctx->width * avctx->height *
491  avctx->frame_number));
492 
493  s->m.mb_type = NULL;
494  ff_mpv_common_end(&s->m);
495 
496  av_freep(&s->m.me.scratchpad);
497  av_freep(&s->m.me.map);
498  av_freep(&s->m.me.score_map);
499  av_freep(&s->mb_type);
500  av_freep(&s->dummy);
501  av_freep(&s->scratchbuf);
502 
503  for (i = 0; i < 3; i++) {
504  av_freep(&s->motion_val8[i]);
505  av_freep(&s->motion_val16[i]);
506  }
507 
510 
511  return 0;
512 }
513 
515 {
516  SVQ1EncContext *const s = avctx->priv_data;
517  int ret;
518 
519  if (avctx->width >= 4096 || avctx->height >= 4096) {
520  av_log(avctx, AV_LOG_ERROR, "Dimensions too large, maximum is 4095x4095\n");
521  return AVERROR(EINVAL);
522  }
523 
524  ff_hpeldsp_init(&s->hdsp, avctx->flags);
525  ff_me_cmp_init(&s->mecc, avctx);
526  ff_mpegvideoencdsp_init(&s->m.mpvencdsp, avctx);
527 
530  if (!s->current_picture || !s->last_picture) {
531  svq1_encode_end(avctx);
532  return AVERROR(ENOMEM);
533  }
534 
535  s->frame_width = avctx->width;
536  s->frame_height = avctx->height;
537 
538  s->y_block_width = (s->frame_width + 15) / 16;
539  s->y_block_height = (s->frame_height + 15) / 16;
540 
541  s->c_block_width = (s->frame_width / 4 + 15) / 16;
542  s->c_block_height = (s->frame_height / 4 + 15) / 16;
543 
544  s->avctx = avctx;
545  s->m.avctx = avctx;
546 
547  if ((ret = ff_mpv_common_init(&s->m)) < 0) {
548  svq1_encode_end(avctx);
549  return ret;
550  }
551 
553  s->m.me.temp =
554  s->m.me.scratchpad = av_mallocz((avctx->width + 64) *
555  2 * 16 * 2 * sizeof(uint8_t));
556  s->m.me.map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t));
557  s->m.me.score_map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t));
558  s->mb_type = av_mallocz((s->y_block_width + 1) *
559  s->y_block_height * sizeof(int16_t));
560  s->dummy = av_mallocz((s->y_block_width + 1) *
561  s->y_block_height * sizeof(int32_t));
563 
564  if (!s->m.me.temp || !s->m.me.scratchpad || !s->m.me.map ||
565  !s->m.me.score_map || !s->mb_type || !s->dummy) {
566  svq1_encode_end(avctx);
567  return AVERROR(ENOMEM);
568  }
569 
570  if (ARCH_PPC)
572  if (ARCH_X86)
574 
575  ff_h263_encode_init(&s->m); // mv_penalty
576 
577  return 0;
578 }
579 
581  const AVFrame *pict, int *got_packet)
582 {
583  SVQ1EncContext *const s = avctx->priv_data;
584  int i, ret;
585 
586  if ((ret = ff_alloc_packet2(avctx, pkt, s->y_block_width * s->y_block_height *
588  return ret;
589 
590  if (avctx->pix_fmt != AV_PIX_FMT_YUV410P) {
591  av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n");
592  return -1;
593  }
594 
595  if (!s->current_picture->data[0]) {
596  if ((ret = ff_get_buffer(avctx, s->current_picture, 0)) < 0) {
597  return ret;
598  }
599  }
600  if (!s->last_picture->data[0]) {
601  ret = ff_get_buffer(avctx, s->last_picture, 0);
602  if (ret < 0)
603  return ret;
604  }
605  if (!s->scratchbuf) {
606  s->scratchbuf = av_malloc_array(s->current_picture->linesize[0], 16 * 3);
607  if (!s->scratchbuf)
608  return AVERROR(ENOMEM);
609  }
610 
612 
613  init_put_bits(&s->pb, pkt->data, pkt->size);
614 
615  if (avctx->gop_size && (avctx->frame_number % avctx->gop_size))
617  else
619  s->quality = pict->quality;
620 
621 #if FF_API_CODED_FRAME
623  avctx->coded_frame->pict_type = s->pict_type;
626 #endif
627 
629 
631  for (i = 0; i < 3; i++) {
632  int ret = svq1_encode_plane(s, i,
633  pict->data[i],
634  s->last_picture->data[i],
635  s->current_picture->data[i],
636  s->frame_width / (i ? 4 : 1),
637  s->frame_height / (i ? 4 : 1),
638  pict->linesize[i],
639  s->current_picture->linesize[i]);
640  emms_c();
641  if (ret < 0) {
642  int j;
643  for (j = 0; j < i; j++) {
644  av_freep(&s->motion_val8[j]);
645  av_freep(&s->motion_val16[j]);
646  }
647  av_freep(&s->scratchbuf);
648  return -1;
649  }
650  }
651 
652  // avpriv_align_put_bits(&s->pb);
653  while (put_bits_count(&s->pb) & 31)
654  put_bits(&s->pb, 1, 0);
655 
656  flush_put_bits(&s->pb);
657 
658  pkt->size = put_bits_count(&s->pb) / 8;
659  if (s->pict_type == AV_PICTURE_TYPE_I)
660  pkt->flags |= AV_PKT_FLAG_KEY;
661  *got_packet = 1;
662 
663  return 0;
664 }
665 
666 #define OFFSET(x) offsetof(struct SVQ1EncContext, x)
667 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
668 static const AVOption options[] = {
669  { "motion-est", "Motion estimation algorithm", OFFSET(motion_est), AV_OPT_TYPE_INT, { .i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_XONE, VE, "motion-est"},
670  { "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, FF_MPV_OPT_FLAGS, "motion-est" },
671  { "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, FF_MPV_OPT_FLAGS, "motion-est" },
672  { "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, FF_MPV_OPT_FLAGS, "motion-est" },
673 
674  { NULL },
675 };
676 
677 static const AVClass svq1enc_class = {
678  .class_name = "svq1enc",
679  .item_name = av_default_item_name,
680  .option = options,
681  .version = LIBAVUTIL_VERSION_INT,
682 };
683 
685  .name = "svq1",
686  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
687  .type = AVMEDIA_TYPE_VIDEO,
688  .id = AV_CODEC_ID_SVQ1,
689  .priv_data_size = sizeof(SVQ1EncContext),
690  .priv_class = &svq1enc_class,
692  .encode2 = svq1_encode_frame,
693  .close = svq1_encode_end,
694  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV410P,
695  AV_PIX_FMT_NONE },
696 };
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:52
int plane
Definition: avisynth_c.h:384
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1035
#define NULL
Definition: coverity.c:32
static const AVClass svq1enc_class
Definition: svq1enc.c:677
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
uint16_t * mb_type
Definition: svq1enc.h:67
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
Definition: mpegvideo.h:248
AVOption.
Definition: opt.h:246
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:720
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:885
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegpicture.h:74
#define THRESHOLD_MULTIPLIER
Definition: svq1enc.c:77
int frame_height
Definition: svq1enc.h:55
MpegEncContext m
Definition: svq1enc.h:38
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
else temp
Definition: vf_mcdeint.c:256
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:761
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
uint16_t * mb_var
Table for MB variances.
Definition: mpegpicture.h:65
AVFrame * current_picture
Definition: svq1enc.h:42
int size
Definition: avcodec.h:1478
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:64
#define FF_MPV_OPT_FLAGS
Definition: mpegvideo.h:613
static int svq1_encode_plane(SVQ1EncContext *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane, int width, int height, int src_stride, int stride)
Definition: svq1enc.c:246
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
int c_block_height
Definition: svq1enc.h:63
uint32_t * score_map
map to store the scores
Definition: motion_est.h:59
mpegvideo header.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
int scene_change_score
Definition: motion_est.h:87
static AVPacket pkt
#define SVQ1_BLOCK_INTRA
Definition: svq1.h:43
#define src
Definition: vp8dsp.c:254
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
AVCodec.
Definition: avcodec.h:3477
int qscale
QP.
Definition: mpegvideo.h:204
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:307
void ff_h263_encode_motion(PutBitContext *pb, int val, int f_code)
Definition: ituh263enc.c:646
svq1 code books.
#define FF_ME_ZERO
Definition: motion_est.h:40
static const int8_t svq1_intra_codebook_sum[4][16 *6]
Definition: svq1enc_cb.h:59
static void init_block_index(MpegEncContext *s)
Definition: svq1enc.c:237
const uint16_t ff_svq1_frame_size_table[7][2]
Definition: svq1.c:40
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define SVQ1_BLOCK_SKIP
Definition: svq1.h:40
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:105
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
static av_cold int svq1_encode_init(AVCodecContext *avctx)
Definition: svq1enc.c:514
int y_block_height
Definition: svq1enc.h:59
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define ME_MAP_SIZE
Definition: motion_est.h:38
#define av_cold
Definition: attributes.h:82
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
int frame_width
Definition: svq1enc.h:54
const int8_t *const ff_svq1_inter_codebooks[6]
Definition: svq1_cb.h:776
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int16_t encoded_block_levels[6][7][256]
Definition: svq1enc.h:65
enum AVPictureType pict_type
Definition: svq1enc.h:47
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:180
#define height
uint8_t * data
Definition: avcodec.h:1477
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:129
ptrdiff_t size
Definition: opengl_enc.c:100
#define AV_INPUT_BUFFER_MIN_SIZE
minimum encoding buffer size Used to avoid some checks during header writing.
Definition: avcodec.h:797
#define av_log(a,...)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1509
int c_block_width
Definition: svq1enc.h:62
static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2, intptr_t size)
Definition: svq1enc.c:79
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
int64_t rd_total
Definition: svq1enc.h:72
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define MAX_MB_BYTES
Definition: mpegutils.h:47
uint8_t * scratchbuf
Definition: svq1enc.h:74
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int y_block_width
Definition: svq1enc.h:58
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1645
uint8_t * buf
Definition: put_bits.h:38
simple assert() macros that are a bit more flexible than ISO C assert().
MpegvideoEncDSPContext mpvencdsp
Definition: mpegvideo.h:233
const char * name
Name of the codec implementation.
Definition: avcodec.h:3484
int16_t(*[3] motion_val16)[2]
Definition: svq1enc.h:70
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:291
#define SVQ1_BLOCK_INTER
Definition: svq1.h:41
GLsizei count
Definition: opengl_enc.c:108
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1133
static int encode_block(SVQ1EncContext *s, uint8_t *src, uint8_t *ref, uint8_t *decoded, int stride, int level, int threshold, int lambda, int intra)
Definition: svq1enc.c:89
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1483
int motion_est
Definition: svq1enc.h:76
Sorenson Vector Quantizer #1 (SVQ1) video codec.
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
PutBitContext reorder_pb[6]
Definition: svq1enc.h:52
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:351
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int(* ssd_int8_vs_int16)(const int8_t *pix1, const int16_t *pix2, intptr_t size)
Definition: svq1enc.h:78
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:174
const uint8_t ff_svq1_block_type_vlc[4][2]
Definition: svq1_vlc.h:27
#define width
int width
picture width / height.
Definition: avcodec.h:1738
#define VE
Definition: svq1enc.c:667
uint8_t w
Definition: llviddspenc.c:38
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:184
MECmpContext mecc
Definition: svq1enc.h:40
int32_t
AVCodec ff_svq1_encoder
Definition: svq1enc.c:684
#define s(width, name)
Definition: cbs_vp9.c:257
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:391
MotionEstContext me
Definition: mpegvideo.h:282
#define FF_ARRAY_ELEMS(a)
const uint8_t ff_svq1_inter_multistage_vlc[6][8][2]
Definition: svq1_vlc.h:50
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:293
static av_cold int svq1_encode_end(AVCodecContext *avctx)
Definition: svq1enc.c:484
PutBitContext pb
Definition: svq1enc.h:44
int first_slice_line
used in MPEG-4 too to handle resync markers
Definition: mpegvideo.h:436
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegpicture.h:68
int16_t(*[3] motion_val8)[2]
Definition: svq1enc.h:69
Half-pel DSP functions.
#define FF_ME_XONE
Definition: motion_est.h:42
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:207
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:134
static void svq1_write_header(SVQ1EncContext *s, int frame_type)
Definition: svq1enc.c:42
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:299
av_cold void ff_svq1enc_init_ppc(SVQ1EncContext *c)
main external API structure.
Definition: avcodec.h:1565
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:100
#define FF_ME_EPZS
Definition: motion_est.h:41
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1964
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
uint8_t * buf_end
Definition: put_bits.h:38
void ff_fix_long_p_mvs(MpegEncContext *s)
Definition: motion_est.c:1651
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
struct AVFrame * f
Definition: mpegpicture.h:46
int f_code
forward MV resolution
Definition: mpegvideo.h:238
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:212
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
int motion_est
ME algorithm.
Definition: mpegvideo.h:258
void ff_svq1enc_init_x86(SVQ1EncContext *c)
Definition: svq1enc_init.c:32
AVFrame * last_picture
Definition: svq1enc.h:43
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:282
uint8_t level
Definition: svq3.c:207
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:131
me_cmp_func sse[6]
Definition: me_cmp.h:57
MpegEncContext.
Definition: mpegvideo.h:81
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1760
PutBitContext pb
bit output
Definition: mpegvideo.h:151
int
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
MECmpContext mecc
Definition: mpegvideo.h:231
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
const int8_t *const ff_svq1_intra_codebooks[6]
Definition: svq1_cb.h:1519
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:130
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
AVCodecContext * avctx
Definition: svq1enc.h:39
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
const uint8_t ff_svq1_intra_multistage_vlc[6][8][2]
Definition: svq1_vlc.h:33
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:162
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:182
static const int8_t svq1_inter_codebook_sum[4][16 *6]
Definition: svq1enc_cb.h:32
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:2811
const uint16_t ff_svq1_inter_mean_vlc[512][2]
Definition: svq1_vlc.h:136
#define OFFSET(x)
Definition: svq1enc.c:666
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
uint32_t * map
map to avoid duplicate evaluations
Definition: motion_est.h:58
static const AVOption options[]
Definition: svq1enc.c:668
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:104
void * priv_data
Definition: avcodec.h:1592
#define PICT_FRAME
Definition: mpegutils.h:39
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:894
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int picture_structure
Definition: mpegvideo.h:461
int dia_size
ME diamond size & shape.
Definition: avcodec.h:1993
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
uint32_t * dummy
Definition: svq1enc.h:68
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:346
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1700
static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: svq1enc.c:580
HpelDSPContext hdsp
Definition: svq1enc.h:41
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2252
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:56
#define av_freep(p)
uint8_t * temp
Definition: motion_est.h:56
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
Definition: common.h:99
#define QUALITY_THRESHOLD
Definition: svq1enc.c:76
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1454
const uint16_t ff_svq1_intra_mean_vlc[256][2]
Definition: svq1_vlc.h:67
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:1750
Predicted.
Definition: avutil.h:275
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:206