FFmpeg
svq1enc.c
Go to the documentation of this file.
1 /*
2  * SVQ1 Encoder
3  * Copyright (C) 2004 Mike Melanson <melanson@pcisys.net>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Sorenson Vector Quantizer #1 (SVQ1) video codec.
25  * For more information of the SVQ1 algorithm, visit:
26  * http://www.pcisys.net/~melanson/codecs/
27  */
28 
29 #include "avcodec.h"
30 #include "hpeldsp.h"
31 #include "me_cmp.h"
32 #include "mpegvideo.h"
33 #include "h263.h"
34 #include "internal.h"
35 #include "mpegutils.h"
36 #include "packet_internal.h"
37 #include "svq1.h"
38 #include "svq1enc.h"
39 #include "svq1enc_cb.h"
40 #include "libavutil/avassert.h"
41 
42 
44 {
45  int i;
46 
47  /* frame code */
48  put_bits(&s->pb, 22, 0x20);
49 
50  /* temporal reference (sure hope this is a "don't care") */
51  put_bits(&s->pb, 8, 0x00);
52 
53  /* frame type */
54  put_bits(&s->pb, 2, frame_type - 1);
55 
56  if (frame_type == AV_PICTURE_TYPE_I) {
57  /* no checksum since frame code is 0x20 */
58  /* no embedded string either */
59  /* output 5 unknown bits (2 + 2 + 1) */
60  put_bits(&s->pb, 5, 2); /* 2 needed by quicktime decoder */
61 
64  s->frame_width, s->frame_height);
65  put_bits(&s->pb, 3, i);
66 
67  if (i == 7) {
68  put_bits(&s->pb, 12, s->frame_width);
69  put_bits(&s->pb, 12, s->frame_height);
70  }
71  }
72 
73  /* no checksum or extra data (next 2 bits get 0) */
74  put_bits(&s->pb, 2, 0);
75 }
76 
77 #define QUALITY_THRESHOLD 100
78 #define THRESHOLD_MULTIPLIER 0.6
79 
80 static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
81  intptr_t size)
82 {
83  int score = 0, i;
84 
85  for (i = 0; i < size; i++)
86  score += (pix1[i] - pix2[i]) * (pix1[i] - pix2[i]);
87  return score;
88 }
89 
91  uint8_t *decoded, int stride, int level,
92  int threshold, int lambda, int intra)
93 {
94  int count, y, x, i, j, split, best_mean, best_score, best_count;
95  int best_vector[6];
96  int block_sum[7] = { 0, 0, 0, 0, 0, 0 };
97  int w = 2 << (level + 2 >> 1);
98  int h = 2 << (level + 1 >> 1);
99  int size = w * h;
100  int16_t (*block)[256] = s->encoded_block_levels[level];
101  const int8_t *codebook_sum, *codebook;
102  const uint16_t(*mean_vlc)[2];
103  const uint8_t(*multistage_vlc)[2];
104 
105  best_score = 0;
106  // FIXME: Optimize, this does not need to be done multiple times.
107  if (intra) {
108  // level is 5 when encode_block is called from svq1_encode_plane
109  // and always < 4 when called recursively from this function.
110  codebook_sum = level < 4 ? svq1_intra_codebook_sum[level] : NULL;
111  codebook = ff_svq1_intra_codebooks[level];
112  mean_vlc = ff_svq1_intra_mean_vlc;
113  multistage_vlc = ff_svq1_intra_multistage_vlc[level];
114  for (y = 0; y < h; y++) {
115  for (x = 0; x < w; x++) {
116  int v = src[x + y * stride];
117  block[0][x + w * y] = v;
118  best_score += v * v;
119  block_sum[0] += v;
120  }
121  }
122  } else {
123  // level is 5 or < 4, see above for details.
124  codebook_sum = level < 4 ? svq1_inter_codebook_sum[level] : NULL;
125  codebook = ff_svq1_inter_codebooks[level];
126  mean_vlc = ff_svq1_inter_mean_vlc + 256;
127  multistage_vlc = ff_svq1_inter_multistage_vlc[level];
128  for (y = 0; y < h; y++) {
129  for (x = 0; x < w; x++) {
130  int v = src[x + y * stride] - ref[x + y * stride];
131  block[0][x + w * y] = v;
132  best_score += v * v;
133  block_sum[0] += v;
134  }
135  }
136  }
137 
138  best_count = 0;
139  best_score -= (int)((unsigned)block_sum[0] * block_sum[0] >> (level + 3));
140  best_mean = block_sum[0] + (size >> 1) >> (level + 3);
141 
142  if (level < 4) {
143  for (count = 1; count < 7; count++) {
144  int best_vector_score = INT_MAX;
145  int best_vector_sum = -999, best_vector_mean = -999;
146  const int stage = count - 1;
147  const int8_t *vector;
148 
149  for (i = 0; i < 16; i++) {
150  int sum = codebook_sum[stage * 16 + i];
151  int sqr, diff, score;
152 
153  vector = codebook + stage * size * 16 + i * size;
154  sqr = s->ssd_int8_vs_int16(vector, block[stage], size);
155  diff = block_sum[stage] - sum;
156  score = sqr - (diff * (int64_t)diff >> (level + 3)); // FIXME: 64 bits slooow
157  if (score < best_vector_score) {
158  int mean = diff + (size >> 1) >> (level + 3);
159  av_assert2(mean > -300 && mean < 300);
160  mean = av_clip(mean, intra ? 0 : -256, 255);
161  best_vector_score = score;
162  best_vector[stage] = i;
163  best_vector_sum = sum;
164  best_vector_mean = mean;
165  }
166  }
167  av_assert0(best_vector_mean != -999);
168  vector = codebook + stage * size * 16 + best_vector[stage] * size;
169  for (j = 0; j < size; j++)
170  block[stage + 1][j] = block[stage][j] - vector[j];
171  block_sum[stage + 1] = block_sum[stage] - best_vector_sum;
172  best_vector_score += lambda *
173  (+1 + 4 * count +
174  multistage_vlc[1 + count][1]
175  + mean_vlc[best_vector_mean][1]);
176 
177  if (best_vector_score < best_score) {
178  best_score = best_vector_score;
179  best_count = count;
180  best_mean = best_vector_mean;
181  }
182  }
183  }
184 
185  split = 0;
186  if (best_score > threshold && level) {
187  int score = 0;
188  int offset = level & 1 ? stride * h / 2 : w / 2;
189  PutBitContext backup[6];
190 
191  for (i = level - 1; i >= 0; i--)
192  backup[i] = s->reorder_pb[i];
193  score += encode_block(s, src, ref, decoded, stride, level - 1,
194  threshold >> 1, lambda, intra);
195  score += encode_block(s, src + offset, ref + offset, decoded + offset,
196  stride, level - 1, threshold >> 1, lambda, intra);
197  score += lambda;
198 
199  if (score < best_score) {
200  best_score = score;
201  split = 1;
202  } else {
203  for (i = level - 1; i >= 0; i--)
204  s->reorder_pb[i] = backup[i];
205  }
206  }
207  if (level > 0)
208  put_bits(&s->reorder_pb[level], 1, split);
209 
210  if (!split) {
211  av_assert1(best_mean >= 0 && best_mean < 256 || !intra);
212  av_assert1(best_mean >= -256 && best_mean < 256);
213  av_assert1(best_count >= 0 && best_count < 7);
214  av_assert1(level < 4 || best_count == 0);
215 
216  /* output the encoding */
217  put_bits(&s->reorder_pb[level],
218  multistage_vlc[1 + best_count][1],
219  multistage_vlc[1 + best_count][0]);
220  put_bits(&s->reorder_pb[level], mean_vlc[best_mean][1],
221  mean_vlc[best_mean][0]);
222 
223  for (i = 0; i < best_count; i++) {
224  av_assert2(best_vector[i] >= 0 && best_vector[i] < 16);
225  put_bits(&s->reorder_pb[level], 4, best_vector[i]);
226  }
227 
228  for (y = 0; y < h; y++)
229  for (x = 0; x < w; x++)
230  decoded[x + y * stride] = src[x + y * stride] -
231  block[best_count][x + w * y] +
232  best_mean;
233  }
234 
235  return best_score;
236 }
237 
239  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) + s->mb_x*2;
240  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) + 1 + s->mb_x*2;
241  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) + s->mb_x*2;
242  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) + 1 + s->mb_x*2;
243  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x;
244  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x;
245 }
246 
247 static int svq1_encode_plane(SVQ1EncContext *s, int plane,
248  unsigned char *src_plane,
249  unsigned char *ref_plane,
250  unsigned char *decoded_plane,
251  int width, int height, int src_stride, int stride)
252 {
253  int x, y;
254  int i;
255  int block_width, block_height;
256  int level;
257  int threshold[6];
258  uint8_t *src = s->scratchbuf + stride * 32;
259  const int lambda = (s->quality * s->quality) >>
260  (2 * FF_LAMBDA_SHIFT);
261 
262  /* figure out the acceptable level thresholds in advance */
263  threshold[5] = QUALITY_THRESHOLD;
264  for (level = 4; level >= 0; level--)
265  threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER;
266 
267  block_width = (width + 15) / 16;
268  block_height = (height + 15) / 16;
269 
270  if (s->pict_type == AV_PICTURE_TYPE_P) {
271  s->m.avctx = s->avctx;
273  s->m.last_picture_ptr = &s->m.last_picture;
274  s->m.last_picture.f->data[0] = ref_plane;
275  s->m.linesize =
276  s->m.last_picture.f->linesize[0] =
277  s->m.new_picture.f->linesize[0] =
278  s->m.current_picture.f->linesize[0] = stride;
279  s->m.width = width;
280  s->m.height = height;
281  s->m.mb_width = block_width;
282  s->m.mb_height = block_height;
283  s->m.mb_stride = s->m.mb_width + 1;
284  s->m.b8_stride = 2 * s->m.mb_width + 1;
285  s->m.f_code = 1;
286  s->m.pict_type = s->pict_type;
287  s->m.motion_est = s->motion_est;
288  s->m.me.scene_change_score = 0;
289  // s->m.out_format = FMT_H263;
290  // s->m.unrestricted_mv = 1;
291  s->m.lambda = s->quality;
292  s->m.qscale = s->m.lambda * 139 +
293  FF_LAMBDA_SCALE * 64 >>
294  FF_LAMBDA_SHIFT + 7;
295  s->m.lambda2 = s->m.lambda * s->m.lambda +
296  FF_LAMBDA_SCALE / 2 >>
298 
299  if (!s->motion_val8[plane]) {
300  s->motion_val8[plane] = av_mallocz((s->m.b8_stride *
301  block_height * 2 + 2) *
302  2 * sizeof(int16_t));
303  s->motion_val16[plane] = av_mallocz((s->m.mb_stride *
304  (block_height + 2) + 1) *
305  2 * sizeof(int16_t));
306  if (!s->motion_val8[plane] || !s->motion_val16[plane])
307  return AVERROR(ENOMEM);
308  }
309 
310  s->m.mb_type = s->mb_type;
311 
312  // dummies, to avoid segfaults
314  s->m.current_picture.mb_var = (uint16_t *)s->dummy;
315  s->m.current_picture.mc_mb_var = (uint16_t *)s->dummy;
316  s->m.current_picture.mb_type = s->dummy;
317 
318  s->m.current_picture.motion_val[0] = s->motion_val8[plane] + 2;
319  s->m.p_mv_table = s->motion_val16[plane] +
320  s->m.mb_stride + 1;
321  s->m.mecc = s->mecc; // move
322  ff_init_me(&s->m);
323 
324  s->m.me.dia_size = s->avctx->dia_size;
325  s->m.first_slice_line = 1;
326  for (y = 0; y < block_height; y++) {
327  s->m.new_picture.f->data[0] = src - y * 16 * stride; // ugly
328  s->m.mb_y = y;
329 
330  for (i = 0; i < 16 && i + 16 * y < height; i++) {
331  memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride],
332  width);
333  for (x = width; x < 16 * block_width; x++)
334  src[i * stride + x] = src[i * stride + x - 1];
335  }
336  for (; i < 16 && i + 16 * y < 16 * block_height; i++)
337  memcpy(&src[i * stride], &src[(i - 1) * stride],
338  16 * block_width);
339 
340  for (x = 0; x < block_width; x++) {
341  s->m.mb_x = x;
342  init_block_index(&s->m);
343 
344  ff_estimate_p_frame_motion(&s->m, x, y);
345  }
346  s->m.first_slice_line = 0;
347  }
348 
350  ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code,
352  }
353 
354  s->m.first_slice_line = 1;
355  for (y = 0; y < block_height; y++) {
356  for (i = 0; i < 16 && i + 16 * y < height; i++) {
357  memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride],
358  width);
359  for (x = width; x < 16 * block_width; x++)
360  src[i * stride + x] = src[i * stride + x - 1];
361  }
362  for (; i < 16 && i + 16 * y < 16 * block_height; i++)
363  memcpy(&src[i * stride], &src[(i - 1) * stride], 16 * block_width);
364 
365  s->m.mb_y = y;
366  for (x = 0; x < block_width; x++) {
367  uint8_t reorder_buffer[2][6][7 * 32];
368  int count[2][6];
369  int offset = y * 16 * stride + x * 16;
370  uint8_t *decoded = decoded_plane + offset;
371  uint8_t *ref = ref_plane + offset;
372  int score[4] = { 0, 0, 0, 0 }, best;
373  uint8_t *temp = s->scratchbuf;
374 
375  if (s->pb.buf_end - s->pb.buf -
376  (put_bits_count(&s->pb) >> 3) < 3000) { // FIXME: check size
377  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
378  return -1;
379  }
380 
381  s->m.mb_x = x;
382  init_block_index(&s->m);
383 
384  if (s->pict_type == AV_PICTURE_TYPE_I ||
385  (s->m.mb_type[x + y * s->m.mb_stride] &
387  for (i = 0; i < 6; i++)
388  init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i],
389  7 * 32);
390  if (s->pict_type == AV_PICTURE_TYPE_P) {
392  put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
393  score[0] = vlc[1] * lambda;
394  }
395  score[0] += encode_block(s, src + 16 * x, NULL, temp, stride,
396  5, 64, lambda, 1);
397  for (i = 0; i < 6; i++) {
398  count[0][i] = put_bits_count(&s->reorder_pb[i]);
399  flush_put_bits(&s->reorder_pb[i]);
400  }
401  } else
402  score[0] = INT_MAX;
403 
404  best = 0;
405 
406  if (s->pict_type == AV_PICTURE_TYPE_P) {
408  int mx, my, pred_x, pred_y, dxy;
409  int16_t *motion_ptr;
410 
411  motion_ptr = ff_h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
412  if (s->m.mb_type[x + y * s->m.mb_stride] &
414  for (i = 0; i < 6; i++)
415  init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i],
416  7 * 32);
417 
418  put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
419 
420  s->m.pb = s->reorder_pb[5];
421  mx = motion_ptr[0];
422  my = motion_ptr[1];
423  av_assert1(mx >= -32 && mx <= 31);
424  av_assert1(my >= -32 && my <= 31);
425  av_assert1(pred_x >= -32 && pred_x <= 31);
426  av_assert1(pred_y >= -32 && pred_y <= 31);
427  ff_h263_encode_motion(&s->m.pb, mx - pred_x, 1);
428  ff_h263_encode_motion(&s->m.pb, my - pred_y, 1);
429  s->reorder_pb[5] = s->m.pb;
430  score[1] += lambda * put_bits_count(&s->reorder_pb[5]);
431 
432  dxy = (mx & 1) + 2 * (my & 1);
433 
434  s->hdsp.put_pixels_tab[0][dxy](temp + 16*stride,
435  ref + (mx >> 1) +
436  stride * (my >> 1),
437  stride, 16);
438 
439  score[1] += encode_block(s, src + 16 * x, temp + 16*stride,
440  decoded, stride, 5, 64, lambda, 0);
441  best = score[1] <= score[0];
442 
444  score[2] = s->mecc.sse[0](NULL, src + 16 * x, ref,
445  stride, 16);
446  score[2] += vlc[1] * lambda;
447  if (score[2] < score[best] && mx == 0 && my == 0) {
448  best = 2;
449  s->hdsp.put_pixels_tab[0][0](decoded, ref, stride, 16);
450  put_bits(&s->pb, vlc[1], vlc[0]);
451  }
452  }
453 
454  if (best == 1) {
455  for (i = 0; i < 6; i++) {
456  count[1][i] = put_bits_count(&s->reorder_pb[i]);
457  flush_put_bits(&s->reorder_pb[i]);
458  }
459  } else {
460  motion_ptr[0] =
461  motion_ptr[1] =
462  motion_ptr[2] =
463  motion_ptr[3] =
464  motion_ptr[0 + 2 * s->m.b8_stride] =
465  motion_ptr[1 + 2 * s->m.b8_stride] =
466  motion_ptr[2 + 2 * s->m.b8_stride] =
467  motion_ptr[3 + 2 * s->m.b8_stride] = 0;
468  }
469  }
470 
471  s->rd_total += score[best];
472 
473  if (best != 2)
474  for (i = 5; i >= 0; i--)
475  avpriv_copy_bits(&s->pb, reorder_buffer[best][i],
476  count[best][i]);
477  if (best == 0)
478  s->hdsp.put_pixels_tab[0][0](decoded, temp, stride, 16);
479  }
480  s->m.first_slice_line = 0;
481  }
482  return 0;
483 }
484 
486 {
487  SVQ1EncContext *const s = avctx->priv_data;
488  int i;
489 
490  av_log(avctx, AV_LOG_DEBUG, "RD: %f\n",
491  s->rd_total / (double)(avctx->width * avctx->height *
492  avctx->frame_number));
493 
494  s->m.mb_type = NULL;
495  ff_mpv_common_end(&s->m);
496 
497  av_freep(&s->m.me.scratchpad);
498  av_freep(&s->m.me.map);
499  av_freep(&s->m.me.score_map);
500  av_freep(&s->mb_type);
501  av_freep(&s->dummy);
502  av_freep(&s->scratchbuf);
503 
504  for (i = 0; i < 3; i++) {
505  av_freep(&s->motion_val8[i]);
506  av_freep(&s->motion_val16[i]);
507  }
508 
511 
512  return 0;
513 }
514 
516 {
517  SVQ1EncContext *const s = avctx->priv_data;
518  int ret;
519 
520  if (avctx->width >= 4096 || avctx->height >= 4096) {
521  av_log(avctx, AV_LOG_ERROR, "Dimensions too large, maximum is 4095x4095\n");
522  return AVERROR(EINVAL);
523  }
524 
525  ff_hpeldsp_init(&s->hdsp, avctx->flags);
526  ff_me_cmp_init(&s->mecc, avctx);
527  ff_mpegvideoencdsp_init(&s->m.mpvencdsp, avctx);
528 
531  if (!s->current_picture || !s->last_picture) {
532  return AVERROR(ENOMEM);
533  }
534 
535  s->frame_width = avctx->width;
536  s->frame_height = avctx->height;
537 
538  s->y_block_width = (s->frame_width + 15) / 16;
539  s->y_block_height = (s->frame_height + 15) / 16;
540 
541  s->c_block_width = (s->frame_width / 4 + 15) / 16;
542  s->c_block_height = (s->frame_height / 4 + 15) / 16;
543 
544  s->avctx = avctx;
545  s->m.avctx = avctx;
546 
547  if ((ret = ff_mpv_common_init(&s->m)) < 0) {
548  return ret;
549  }
550 
552  s->m.me.temp =
553  s->m.me.scratchpad = av_mallocz((avctx->width + 64) *
554  2 * 16 * 2 * sizeof(uint8_t));
555  s->m.me.map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t));
556  s->m.me.score_map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t));
557  s->mb_type = av_mallocz((s->y_block_width + 1) *
558  s->y_block_height * sizeof(int16_t));
559  s->dummy = av_mallocz((s->y_block_width + 1) *
560  s->y_block_height * sizeof(int32_t));
562 
563  if (!s->m.me.temp || !s->m.me.scratchpad || !s->m.me.map ||
564  !s->m.me.score_map || !s->mb_type || !s->dummy) {
565  return AVERROR(ENOMEM);
566  }
567 
568  if (ARCH_PPC)
570  if (ARCH_X86)
572 
573  ff_h263_encode_init(&s->m); // mv_penalty
574 
575  return 0;
576 }
577 
579  const AVFrame *pict, int *got_packet)
580 {
581  SVQ1EncContext *const s = avctx->priv_data;
582  int i, ret;
583 
584  if ((ret = ff_alloc_packet2(avctx, pkt, s->y_block_width * s->y_block_height *
586  return ret;
587 
588  if (avctx->pix_fmt != AV_PIX_FMT_YUV410P) {
589  av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n");
590  return -1;
591  }
592 
593  if (!s->current_picture->data[0]) {
594  if ((ret = ff_get_buffer(avctx, s->current_picture, 0)) < 0) {
595  return ret;
596  }
597  }
598  if (!s->last_picture->data[0]) {
599  ret = ff_get_buffer(avctx, s->last_picture, 0);
600  if (ret < 0)
601  return ret;
602  }
603  if (!s->scratchbuf) {
604  s->scratchbuf = av_malloc_array(s->current_picture->linesize[0], 16 * 3);
605  if (!s->scratchbuf)
606  return AVERROR(ENOMEM);
607  }
608 
610 
611  init_put_bits(&s->pb, pkt->data, pkt->size);
612 
613  if (avctx->gop_size && (avctx->frame_number % avctx->gop_size))
615  else
617  s->quality = pict->quality;
618 
619 #if FF_API_CODED_FRAME
621  avctx->coded_frame->pict_type = s->pict_type;
624 #endif
625 
627 
629  for (i = 0; i < 3; i++) {
630  int ret = svq1_encode_plane(s, i,
631  pict->data[i],
632  s->last_picture->data[i],
633  s->current_picture->data[i],
634  s->frame_width / (i ? 4 : 1),
635  s->frame_height / (i ? 4 : 1),
636  pict->linesize[i],
637  s->current_picture->linesize[i]);
638  emms_c();
639  if (ret < 0) {
640  int j;
641  for (j = 0; j < i; j++) {
642  av_freep(&s->motion_val8[j]);
643  av_freep(&s->motion_val16[j]);
644  }
645  av_freep(&s->scratchbuf);
646  return -1;
647  }
648  }
649 
650  // avpriv_align_put_bits(&s->pb);
651  while (put_bits_count(&s->pb) & 31)
652  put_bits(&s->pb, 1, 0);
653 
654  flush_put_bits(&s->pb);
655 
656  pkt->size = put_bits_count(&s->pb) / 8;
657  if (s->pict_type == AV_PICTURE_TYPE_I)
658  pkt->flags |= AV_PKT_FLAG_KEY;
659  *got_packet = 1;
660 
661  return 0;
662 }
663 
664 #define OFFSET(x) offsetof(struct SVQ1EncContext, x)
665 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
666 static const AVOption options[] = {
667  { "motion-est", "Motion estimation algorithm", OFFSET(motion_est), AV_OPT_TYPE_INT, { .i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_XONE, VE, "motion-est"},
668  { "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, FF_MPV_OPT_FLAGS, "motion-est" },
669  { "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, FF_MPV_OPT_FLAGS, "motion-est" },
670  { "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, FF_MPV_OPT_FLAGS, "motion-est" },
671 
672  { NULL },
673 };
674 
675 static const AVClass svq1enc_class = {
676  .class_name = "svq1enc",
677  .item_name = av_default_item_name,
678  .option = options,
679  .version = LIBAVUTIL_VERSION_INT,
680 };
681 
683  .name = "svq1",
684  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
685  .type = AVMEDIA_TYPE_VIDEO,
686  .id = AV_CODEC_ID_SVQ1,
687  .priv_data_size = sizeof(SVQ1EncContext),
688  .priv_class = &svq1enc_class,
690  .encode2 = svq1_encode_frame,
691  .close = svq1_encode_end,
692  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
693  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV410P,
694  AV_PIX_FMT_NONE },
695 };
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:52
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1035
#define NULL
Definition: coverity.c:32
static const AVClass svq1enc_class
Definition: svq1enc.c:675
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
uint16_t * mb_type
Definition: svq1enc.h:67
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
Definition: mpegvideo.h:248
AVOption.
Definition: opt.h:248
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:728
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:885
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegpicture.h:74
#define THRESHOLD_MULTIPLIER
Definition: svq1enc.c:78
int frame_height
Definition: svq1enc.h:55
MpegEncContext m
Definition: svq1enc.h:38
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
else temp
Definition: vf_mcdeint.c:256
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:761
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
uint16_t * mb_var
Table for MB variances.
Definition: mpegpicture.h:65
AVFrame * current_picture
Definition: svq1enc.h:42
int size
Definition: packet.h:356
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:64
#define FF_MPV_OPT_FLAGS
Definition: mpegvideo.h:615
static int svq1_encode_plane(SVQ1EncContext *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane, int width, int height, int src_stride, int stride)
Definition: svq1enc.c:247
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
int c_block_height
Definition: svq1enc.h:63
uint32_t * score_map
map to store the scores
Definition: motion_est.h:59
mpegvideo header.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
int scene_change_score
Definition: motion_est.h:87
static AVPacket pkt
#define SVQ1_BLOCK_INTRA
Definition: svq1.h:43
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
AVCodec.
Definition: codec.h:190
int qscale
QP.
Definition: mpegvideo.h:204
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:307
void ff_h263_encode_motion(PutBitContext *pb, int val, int f_code)
Definition: ituh263enc.c:646
svq1 code books.
#define FF_ME_ZERO
Definition: motion_est.h:40
static const int8_t svq1_intra_codebook_sum[4][16 *6]
Definition: svq1enc_cb.h:59
frame_type
static void init_block_index(MpegEncContext *s)
Definition: svq1enc.c:238
const uint16_t ff_svq1_frame_size_table[7][2]
Definition: svq1.c:40
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define SVQ1_BLOCK_SKIP
Definition: svq1.h:40
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:105
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:33
static av_cold int svq1_encode_init(AVCodecContext *avctx)
Definition: svq1enc.c:515
int y_block_height
Definition: svq1enc.h:59
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define ME_MAP_SIZE
Definition: motion_est.h:38
#define av_cold
Definition: attributes.h:88
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
int frame_width
Definition: svq1enc.h:54
const int8_t *const ff_svq1_inter_codebooks[6]
Definition: svq1_cb.h:776
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int16_t encoded_block_levels[6][7][256]
Definition: svq1enc.h:65
enum AVPictureType pict_type
Definition: svq1enc.h:47
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:180
#define height
uint8_t * data
Definition: packet.h:355
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:129
ptrdiff_t size
Definition: opengl_enc.c:100
#define AV_INPUT_BUFFER_MIN_SIZE
minimum encoding buffer size Used to avoid some checks during header writing.
Definition: avcodec.h:222
#define av_log(a,...)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:388
int c_block_width
Definition: svq1enc.h:62
static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2, intptr_t size)
Definition: svq1enc.c:80
#define src
Definition: vp8dsp.c:254
int64_t rd_total
Definition: svq1enc.h:72
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
#define MAX_MB_BYTES
Definition: mpegutils.h:47
uint8_t * scratchbuf
Definition: svq1enc.h:74
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
int y_block_width
Definition: svq1enc.h:58
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
uint8_t * buf
Definition: put_bits.h:38
simple assert() macros that are a bit more flexible than ISO C assert().
MpegvideoEncDSPContext mpvencdsp
Definition: mpegvideo.h:233
const char * name
Name of the codec implementation.
Definition: codec.h:197
int16_t(*[3] motion_val16)[2]
Definition: svq1enc.h:70
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:291
#define SVQ1_BLOCK_INTER
Definition: svq1.h:41
GLsizei count
Definition: opengl_enc.c:108
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1110
static int encode_block(SVQ1EncContext *s, uint8_t *src, uint8_t *ref, uint8_t *decoded, int stride, int level, int threshold, int lambda, int intra)
Definition: svq1enc.c:90
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:361
int motion_est
Definition: svq1enc.h:76
Sorenson Vector Quantizer #1 (SVQ1) video codec.
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:67
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
PutBitContext reorder_pb[6]
Definition: svq1enc.h:52
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:391
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int(* ssd_int8_vs_int16)(const int8_t *pix1, const int16_t *pix2, intptr_t size)
Definition: svq1enc.h:78
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:174
const uint8_t ff_svq1_block_type_vlc[4][2]
Definition: svq1_vlc.h:27
#define width
int width
picture width / height.
Definition: avcodec.h:699
#define VE
Definition: svq1enc.c:665
uint8_t w
Definition: llviddspenc.c:38
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:184
MECmpContext mecc
Definition: svq1enc.h:40
int32_t
AVCodec ff_svq1_encoder
Definition: svq1enc.c:682
#define s(width, name)
Definition: cbs_vp9.c:257
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:431
MotionEstContext me
Definition: mpegvideo.h:282
#define FF_ARRAY_ELEMS(a)
const uint8_t ff_svq1_inter_multistage_vlc[6][8][2]
Definition: svq1_vlc.h:50
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:293
static av_cold int svq1_encode_end(AVCodecContext *avctx)
Definition: svq1enc.c:485
PutBitContext pb
Definition: svq1enc.h:44
int first_slice_line
used in MPEG-4 too to handle resync markers
Definition: mpegvideo.h:436
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegpicture.h:68
int16_t(*[3] motion_val8)[2]
Definition: svq1enc.h:69
Half-pel DSP functions.
#define FF_ME_XONE
Definition: motion_est.h:42
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:207
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:134
static void svq1_write_header(SVQ1EncContext *s, int frame_type)
Definition: svq1enc.c:43
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
av_cold void ff_svq1enc_init_ppc(SVQ1EncContext *c)
main external API structure.
Definition: avcodec.h:526
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:100
#define FF_ME_EPZS
Definition: motion_est.h:41
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1854
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
uint8_t * buf_end
Definition: put_bits.h:38
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
struct AVFrame * f
Definition: mpegpicture.h:46
int f_code
forward MV resolution
Definition: mpegvideo.h:238
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:212
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
int motion_est
ME algorithm.
Definition: mpegvideo.h:258
void ff_svq1enc_init_x86(SVQ1EncContext *c)
Definition: svq1enc_init.c:32
AVFrame * last_picture
Definition: svq1enc.h:43
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
uint8_t level
Definition: svq3.c:209
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:131
me_cmp_func sse[6]
Definition: me_cmp.h:57
MpegEncContext.
Definition: mpegvideo.h:81
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:721
PutBitContext pb
bit output
Definition: mpegvideo.h:151
int
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
MECmpContext mecc
Definition: mpegvideo.h:231
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
const int8_t *const ff_svq1_intra_codebooks[6]
Definition: svq1_cb.h:1519
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:130
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
AVCodecContext * avctx
Definition: svq1enc.h:39
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
const uint8_t ff_svq1_intra_multistage_vlc[6][8][2]
Definition: svq1_vlc.h:33
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:162
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:182
static const int8_t svq1_inter_codebook_sum[4][16 *6]
Definition: svq1enc_cb.h:32
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:1776
const uint16_t ff_svq1_inter_mean_vlc[512][2]
Definition: svq1_vlc.h:136
#define OFFSET(x)
Definition: svq1enc.c:664
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
uint32_t * map
map to avoid duplicate evaluations
Definition: motion_est.h:58
static const AVOption options[]
Definition: svq1enc.c:666
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:104
void * priv_data
Definition: avcodec.h:553
#define PICT_FRAME
Definition: mpegutils.h:39
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:884
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int picture_structure
Definition: mpegvideo.h:461
int dia_size
ME diamond size & shape.
Definition: avcodec.h:954
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
uint32_t * dummy
Definition: svq1enc.h:68
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:386
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1700
static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: svq1enc.c:578
HpelDSPContext hdsp
Definition: svq1enc.h:41
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1217
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1651
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:56
#define av_freep(p)
uint8_t * temp
Definition: motion_est.h:56
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
Definition: common.h:99
#define QUALITY_THRESHOLD
Definition: svq1enc.c:77
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: packet.h:332
const uint16_t ff_svq1_intra_mean_vlc[256][2]
Definition: svq1_vlc.h:67
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:1805
int i
Definition: input.c:406
Predicted.
Definition: avutil.h:275
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:206