FFmpeg
roqvideoenc.c
Go to the documentation of this file.
1 /*
2  * RoQ Video Encoder.
3  *
4  * Copyright (C) 2007 Vitor Sessak <vitor1001@gmail.com>
5  * Copyright (C) 2004-2007 Eric Lasota
6  * Based on RoQ specs (C) 2001 Tim Ferguson
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * id RoQ encoder by Vitor. Based on the Switchblade3 library and the
28  * Switchblade3 FFmpeg glue by Eric Lasota.
29  */
30 
31 /*
32  * COSTS:
33  * Level 1:
34  * SKIP - 2 bits
35  * MOTION - 2 + 8 bits
36  * CODEBOOK - 2 + 8 bits
37  * SUBDIVIDE - 2 + combined subcel cost
38  *
39  * Level 2:
40  * SKIP - 2 bits
41  * MOTION - 2 + 8 bits
42  * CODEBOOK - 2 + 8 bits
43  * SUBDIVIDE - 2 + 4*8 bits
44  *
45  * Maximum cost: 138 bits per cel
46  *
47  * Proper evaluation requires LCD fraction comparison, which requires
48  * Squared Error (SE) loss * savings increase
49  *
50  * Maximum savings increase: 136 bits
51  * Maximum SE loss without overflow: 31580641
52  * Components in 8x8 supercel: 192
53  * Maximum SE precision per component: 164482
54  * >65025, so no truncation is needed (phew)
55  */
56 
57 #include <string.h>
58 
59 #include "libavutil/attributes.h"
60 #include "libavutil/opt.h"
61 #include "roqvideo.h"
62 #include "bytestream.h"
63 #include "elbg.h"
64 #include "internal.h"
65 #include "mathops.h"
66 
67 #define CHROMA_BIAS 1
68 
69 /**
70  * Maximum number of generated 4x4 codebooks. Can't be 256 to workaround a
71  * Quake 3 bug.
72  */
73 #define MAX_CBS_4x4 256
74 
75 #define MAX_CBS_2x2 256 ///< Maximum number of 2x2 codebooks.
76 
77 /* The cast is useful when multiplying it by INT_MAX */
78 #define ROQ_LAMBDA_SCALE ((uint64_t) FF_LAMBDA_SCALE)
79 
80 /* Macroblock support functions */
81 static void unpack_roq_cell(roq_cell *cell, uint8_t u[4*3])
82 {
83  memcpy(u , cell->y, 4);
84  memset(u+4, cell->u, 4);
85  memset(u+8, cell->v, 4);
86 }
87 
88 static void unpack_roq_qcell(uint8_t cb2[], roq_qcell *qcell, uint8_t u[4*4*3])
89 {
90  int i,cp;
91  static const int offsets[4] = {0, 2, 8, 10};
92 
93  for (cp=0; cp<3; cp++)
94  for (i=0; i<4; i++) {
95  u[4*4*cp + offsets[i] ] = cb2[qcell->idx[i]*2*2*3 + 4*cp ];
96  u[4*4*cp + offsets[i]+1] = cb2[qcell->idx[i]*2*2*3 + 4*cp+1];
97  u[4*4*cp + offsets[i]+4] = cb2[qcell->idx[i]*2*2*3 + 4*cp+2];
98  u[4*4*cp + offsets[i]+5] = cb2[qcell->idx[i]*2*2*3 + 4*cp+3];
99  }
100 }
101 
102 
103 static void enlarge_roq_mb4(uint8_t base[3*16], uint8_t u[3*64])
104 {
105  int x,y,cp;
106 
107  for(cp=0; cp<3; cp++)
108  for(y=0; y<8; y++)
109  for(x=0; x<8; x++)
110  *u++ = base[(y/2)*4 + (x/2) + 16*cp];
111 }
112 
113 static inline int square(int x)
114 {
115  return x*x;
116 }
117 
118 static inline int eval_sse(const uint8_t *a, const uint8_t *b, int count)
119 {
120  int diff=0;
121 
122  while(count--)
123  diff += square(*b++ - *a++);
124 
125  return diff;
126 }
127 
128 // FIXME Could use DSPContext.sse, but it is not so speed critical (used
129 // just for motion estimation).
130 static int block_sse(uint8_t * const *buf1, uint8_t * const *buf2, int x1, int y1,
131  int x2, int y2, const int *stride1, const int *stride2, int size)
132 {
133  int i, k;
134  int sse=0;
135 
136  for (k=0; k<3; k++) {
137  int bias = (k ? CHROMA_BIAS : 4);
138  for (i=0; i<size; i++)
139  sse += bias*eval_sse(buf1[k] + (y1+i)*stride1[k] + x1,
140  buf2[k] + (y2+i)*stride2[k] + x2, size);
141  }
142 
143  return sse;
144 }
145 
146 static int eval_motion_dist(RoqContext *enc, int x, int y, motion_vect vect,
147  int size)
148 {
149  int mx=vect.d[0];
150  int my=vect.d[1];
151 
152  if (mx < -7 || mx > 7)
153  return INT_MAX;
154 
155  if (my < -7 || my > 7)
156  return INT_MAX;
157 
158  mx += x;
159  my += y;
160 
161  if ((unsigned) mx > enc->width-size || (unsigned) my > enc->height-size)
162  return INT_MAX;
163 
164  return block_sse(enc->frame_to_enc->data, enc->last_frame->data, x, y,
165  mx, my,
167  size);
168 }
169 
170 /**
171  * @return distortion between two macroblocks
172  */
173 static inline int squared_diff_macroblock(uint8_t a[], uint8_t b[], int size)
174 {
175  int cp, sdiff=0;
176 
177  for(cp=0;cp<3;cp++) {
178  int bias = (cp ? CHROMA_BIAS : 4);
179  sdiff += bias*eval_sse(a, b, size*size);
180  a += size*size;
181  b += size*size;
182  }
183 
184  return sdiff;
185 }
186 
187 typedef struct SubcelEvaluation {
188  int eval_dist[4];
191 
192  int subCels[4];
194  int cbEntry;
196 
197 typedef struct CelEvaluation {
198  int eval_dist[4];
200 
202 
204  int cbEntry;
205 
207 } CelEvaluation;
208 
209 typedef struct RoqCodebooks {
210  int numCB4;
211  int numCB2;
217 } RoqCodebooks;
218 
219 /**
220  * Temporary vars
221  */
222 typedef struct RoqTempData
223 {
225 
230 
232 
233  int numCB4;
234  int numCB2;
235 
237 
239  int used_option[4];
240 } RoqTempdata;
241 
242 /**
243  * Initialize cel evaluators and set their source coordinates
244  */
245 static int create_cel_evals(RoqContext *enc, RoqTempdata *tempData)
246 {
247  int n=0, x, y, i;
248 
249  tempData->cel_evals = av_malloc_array(enc->width*enc->height/64, sizeof(CelEvaluation));
250  if (!tempData->cel_evals)
251  return AVERROR(ENOMEM);
252 
253  /* Map to the ROQ quadtree order */
254  for (y=0; y<enc->height; y+=16)
255  for (x=0; x<enc->width; x+=16)
256  for(i=0; i<4; i++) {
257  tempData->cel_evals[n ].sourceX = x + (i&1)*8;
258  tempData->cel_evals[n++].sourceY = y + (i&2)*4;
259  }
260 
261  return 0;
262 }
263 
264 /**
265  * Get macroblocks from parts of the image
266  */
267 static void get_frame_mb(const AVFrame *frame, int x, int y, uint8_t mb[], int dim)
268 {
269  int i, j, cp;
270 
271  for (cp=0; cp<3; cp++) {
272  int stride = frame->linesize[cp];
273  for (i=0; i<dim; i++)
274  for (j=0; j<dim; j++)
275  *mb++ = frame->data[cp][(y+i)*stride + x + j];
276  }
277 }
278 
279 /**
280  * Find the codebook with the lowest distortion from an image
281  */
282 static int index_mb(uint8_t cluster[], uint8_t cb[], int numCB,
283  int *outIndex, int dim)
284 {
285  int i, lDiff = INT_MAX, pick=0;
286 
287  /* Diff against the others */
288  for (i=0; i<numCB; i++) {
289  int diff = squared_diff_macroblock(cluster, cb + i*dim*dim*3, dim);
290  if (diff < lDiff) {
291  lDiff = diff;
292  pick = i;
293  }
294  }
295 
296  *outIndex = pick;
297  return lDiff;
298 }
299 
300 #define EVAL_MOTION(MOTION) \
301  do { \
302  diff = eval_motion_dist(enc, j, i, MOTION, blocksize); \
303  \
304  if (diff < lowestdiff) { \
305  lowestdiff = diff; \
306  bestpick = MOTION; \
307  } \
308  } while(0)
309 
310 static void motion_search(RoqContext *enc, int blocksize)
311 {
312  static const motion_vect offsets[8] = {
313  {{ 0,-1}},
314  {{ 0, 1}},
315  {{-1, 0}},
316  {{ 1, 0}},
317  {{-1, 1}},
318  {{ 1,-1}},
319  {{-1,-1}},
320  {{ 1, 1}},
321  };
322 
323  int diff, lowestdiff, oldbest;
324  int off[3];
325  motion_vect bestpick = {{0,0}};
326  int i, j, k, offset;
327 
328  motion_vect *last_motion;
329  motion_vect *this_motion;
330  motion_vect vect, vect2;
331 
332  int max=(enc->width/blocksize)*enc->height/blocksize;
333 
334  if (blocksize == 4) {
335  last_motion = enc->last_motion4;
336  this_motion = enc->this_motion4;
337  } else {
338  last_motion = enc->last_motion8;
339  this_motion = enc->this_motion8;
340  }
341 
342  for (i=0; i<enc->height; i+=blocksize)
343  for (j=0; j<enc->width; j+=blocksize) {
344  lowestdiff = eval_motion_dist(enc, j, i, (motion_vect) {{0,0}},
345  blocksize);
346  bestpick.d[0] = 0;
347  bestpick.d[1] = 0;
348 
349  if (blocksize == 4)
350  EVAL_MOTION(enc->this_motion8[(i/8)*(enc->width/8) + j/8]);
351 
352  offset = (i/blocksize)*enc->width/blocksize + j/blocksize;
353  if (offset < max && offset >= 0)
354  EVAL_MOTION(last_motion[offset]);
355 
356  offset++;
357  if (offset < max && offset >= 0)
358  EVAL_MOTION(last_motion[offset]);
359 
360  offset = (i/blocksize + 1)*enc->width/blocksize + j/blocksize;
361  if (offset < max && offset >= 0)
362  EVAL_MOTION(last_motion[offset]);
363 
364  off[0]= (i/blocksize)*enc->width/blocksize + j/blocksize - 1;
365  off[1]= off[0] - enc->width/blocksize + 1;
366  off[2]= off[1] + 1;
367 
368  if (i) {
369 
370  for(k=0; k<2; k++)
371  vect.d[k]= mid_pred(this_motion[off[0]].d[k],
372  this_motion[off[1]].d[k],
373  this_motion[off[2]].d[k]);
374 
375  EVAL_MOTION(vect);
376  for(k=0; k<3; k++)
377  EVAL_MOTION(this_motion[off[k]]);
378  } else if(j)
379  EVAL_MOTION(this_motion[off[0]]);
380 
381  vect = bestpick;
382 
383  oldbest = -1;
384  while (oldbest != lowestdiff) {
385  oldbest = lowestdiff;
386  for (k=0; k<8; k++) {
387  vect2 = vect;
388  vect2.d[0] += offsets[k].d[0];
389  vect2.d[1] += offsets[k].d[1];
390  EVAL_MOTION(vect2);
391  }
392  vect = bestpick;
393  }
394  offset = (i/blocksize)*enc->width/blocksize + j/blocksize;
395  this_motion[offset] = bestpick;
396  }
397 }
398 
399 /**
400  * Get distortion for all options available to a subcel
401  */
402 static void gather_data_for_subcel(SubcelEvaluation *subcel, int x,
403  int y, RoqContext *enc, RoqTempdata *tempData)
404 {
405  uint8_t mb4[4*4*3];
406  uint8_t mb2[2*2*3];
407  int cluster_index;
408  int i, best_dist;
409 
410  static const int bitsUsed[4] = {2, 10, 10, 34};
411 
412  if (enc->framesSinceKeyframe >= 1) {
413  subcel->motion = enc->this_motion4[y*enc->width/16 + x/4];
414 
415  subcel->eval_dist[RoQ_ID_FCC] =
416  eval_motion_dist(enc, x, y,
417  enc->this_motion4[y*enc->width/16 + x/4], 4);
418  } else
419  subcel->eval_dist[RoQ_ID_FCC] = INT_MAX;
420 
421  if (enc->framesSinceKeyframe >= 2)
423  enc->current_frame->data, x,
424  y, x, y,
425  enc->frame_to_enc->linesize,
426  enc->current_frame->linesize,
427  4);
428  else
429  subcel->eval_dist[RoQ_ID_MOT] = INT_MAX;
430 
431  cluster_index = y*enc->width/16 + x/4;
432 
433  get_frame_mb(enc->frame_to_enc, x, y, mb4, 4);
434 
435  subcel->eval_dist[RoQ_ID_SLD] = index_mb(mb4,
436  tempData->codebooks.unpacked_cb4,
437  tempData->codebooks.numCB4,
438  &subcel->cbEntry, 4);
439 
440  subcel->eval_dist[RoQ_ID_CCC] = 0;
441 
442  for(i=0;i<4;i++) {
443  subcel->subCels[i] = tempData->closest_cb2[cluster_index*4+i];
444 
445  get_frame_mb(enc->frame_to_enc, x+2*(i&1),
446  y+(i&2), mb2, 2);
447 
448  subcel->eval_dist[RoQ_ID_CCC] +=
449  squared_diff_macroblock(tempData->codebooks.unpacked_cb2 + subcel->subCels[i]*2*2*3, mb2, 2);
450  }
451 
452  best_dist = INT_MAX;
453  for (i=0; i<4; i++)
454  if (ROQ_LAMBDA_SCALE*subcel->eval_dist[i] + enc->lambda*bitsUsed[i] <
455  best_dist) {
456  subcel->best_coding = i;
457  subcel->best_bit_use = bitsUsed[i];
458  best_dist = ROQ_LAMBDA_SCALE*subcel->eval_dist[i] +
459  enc->lambda*bitsUsed[i];
460  }
461 }
462 
463 /**
464  * Get distortion for all options available to a cel
465  */
467  RoqTempdata *tempData)
468 {
469  uint8_t mb8[8*8*3];
470  int index = cel->sourceY*enc->width/64 + cel->sourceX/8;
471  int i, j, best_dist, divide_bit_use;
472 
473  int bitsUsed[4] = {2, 10, 10, 0};
474 
475  if (enc->framesSinceKeyframe >= 1) {
476  cel->motion = enc->this_motion8[index];
477 
478  cel->eval_dist[RoQ_ID_FCC] =
479  eval_motion_dist(enc, cel->sourceX, cel->sourceY,
480  enc->this_motion8[index], 8);
481  } else
482  cel->eval_dist[RoQ_ID_FCC] = INT_MAX;
483 
484  if (enc->framesSinceKeyframe >= 2)
486  enc->current_frame->data,
487  cel->sourceX, cel->sourceY,
488  cel->sourceX, cel->sourceY,
489  enc->frame_to_enc->linesize,
490  enc->current_frame->linesize,8);
491  else
492  cel->eval_dist[RoQ_ID_MOT] = INT_MAX;
493 
494  get_frame_mb(enc->frame_to_enc, cel->sourceX, cel->sourceY, mb8, 8);
495 
496  cel->eval_dist[RoQ_ID_SLD] =
497  index_mb(mb8, tempData->codebooks.unpacked_cb4_enlarged,
498  tempData->codebooks.numCB4, &cel->cbEntry, 8);
499 
500  gather_data_for_subcel(cel->subCels + 0, cel->sourceX+0, cel->sourceY+0, enc, tempData);
501  gather_data_for_subcel(cel->subCels + 1, cel->sourceX+4, cel->sourceY+0, enc, tempData);
502  gather_data_for_subcel(cel->subCels + 2, cel->sourceX+0, cel->sourceY+4, enc, tempData);
503  gather_data_for_subcel(cel->subCels + 3, cel->sourceX+4, cel->sourceY+4, enc, tempData);
504 
505  cel->eval_dist[RoQ_ID_CCC] = 0;
506  divide_bit_use = 0;
507  for (i=0; i<4; i++) {
508  cel->eval_dist[RoQ_ID_CCC] +=
509  cel->subCels[i].eval_dist[cel->subCels[i].best_coding];
510  divide_bit_use += cel->subCels[i].best_bit_use;
511  }
512 
513  best_dist = INT_MAX;
514  bitsUsed[3] = 2 + divide_bit_use;
515 
516  for (i=0; i<4; i++)
517  if (ROQ_LAMBDA_SCALE*cel->eval_dist[i] + enc->lambda*bitsUsed[i] <
518  best_dist) {
519  cel->best_coding = i;
520  best_dist = ROQ_LAMBDA_SCALE*cel->eval_dist[i] +
521  enc->lambda*bitsUsed[i];
522  }
523 
524  tempData->used_option[cel->best_coding]++;
525  tempData->mainChunkSize += bitsUsed[cel->best_coding];
526 
527  if (cel->best_coding == RoQ_ID_SLD)
528  tempData->codebooks.usedCB4[cel->cbEntry]++;
529 
530  if (cel->best_coding == RoQ_ID_CCC)
531  for (i=0; i<4; i++) {
532  if (cel->subCels[i].best_coding == RoQ_ID_SLD)
533  tempData->codebooks.usedCB4[cel->subCels[i].cbEntry]++;
534  else if (cel->subCels[i].best_coding == RoQ_ID_CCC)
535  for (j=0; j<4; j++)
536  tempData->codebooks.usedCB2[cel->subCels[i].subCels[j]]++;
537  }
538 }
539 
540 static void remap_codebooks(RoqContext *enc, RoqTempdata *tempData)
541 {
542  int i, j, idx=0;
543 
544  /* Make remaps for the final codebook usage */
545  for (i=0; i<(enc->quake3_compat ? MAX_CBS_4x4-1 : MAX_CBS_4x4); i++) {
546  if (tempData->codebooks.usedCB4[i]) {
547  tempData->i2f4[i] = idx;
548  tempData->f2i4[idx] = i;
549  for (j=0; j<4; j++)
550  tempData->codebooks.usedCB2[enc->cb4x4[i].idx[j]]++;
551  idx++;
552  }
553  }
554 
555  tempData->numCB4 = idx;
556 
557  idx = 0;
558  for (i=0; i<MAX_CBS_2x2; i++) {
559  if (tempData->codebooks.usedCB2[i]) {
560  tempData->i2f2[i] = idx;
561  tempData->f2i2[idx] = i;
562  idx++;
563  }
564  }
565  tempData->numCB2 = idx;
566 
567 }
568 
569 /**
570  * Write codebook chunk
571  */
572 static void write_codebooks(RoqContext *enc, RoqTempdata *tempData)
573 {
574  int i, j;
575  uint8_t **outp= &enc->out_buf;
576 
577  if (tempData->numCB2) {
578  bytestream_put_le16(outp, RoQ_QUAD_CODEBOOK);
579  bytestream_put_le32(outp, tempData->numCB2*6 + tempData->numCB4*4);
580  bytestream_put_byte(outp, tempData->numCB4);
581  bytestream_put_byte(outp, tempData->numCB2);
582 
583  for (i=0; i<tempData->numCB2; i++) {
584  bytestream_put_buffer(outp, enc->cb2x2[tempData->f2i2[i]].y, 4);
585  bytestream_put_byte(outp, enc->cb2x2[tempData->f2i2[i]].u);
586  bytestream_put_byte(outp, enc->cb2x2[tempData->f2i2[i]].v);
587  }
588 
589  for (i=0; i<tempData->numCB4; i++)
590  for (j=0; j<4; j++)
591  bytestream_put_byte(outp, tempData->i2f2[enc->cb4x4[tempData->f2i4[i]].idx[j]]);
592 
593  }
594 }
595 
596 static inline uint8_t motion_arg(motion_vect mot)
597 {
598  uint8_t ax = 8 - ((uint8_t) mot.d[0]);
599  uint8_t ay = 8 - ((uint8_t) mot.d[1]);
600  return ((ax&15)<<4) | (ay&15);
601 }
602 
603 typedef struct CodingSpool {
609 } CodingSpool;
610 
611 /* NOTE: Typecodes must be spooled AFTER arguments!! */
613 {
614  s->typeSpool |= (type & 3) << (14 - s->typeSpoolLength);
615  s->typeSpoolLength += 2;
616  if (s->typeSpoolLength == 16) {
617  bytestream_put_le16(s->pout, s->typeSpool);
618  bytestream_put_buffer(s->pout, s->argumentSpool,
619  s->args - s->argumentSpool);
620  s->typeSpoolLength = 0;
621  s->typeSpool = 0;
622  s->args = s->argumentSpool;
623  }
624 }
625 
626 static void reconstruct_and_encode_image(RoqContext *enc, RoqTempdata *tempData, int w, int h, int numBlocks)
627 {
628  int i, j, k;
629  int x, y;
630  int subX, subY;
631  int dist=0;
632 
633  roq_qcell *qcell;
634  CelEvaluation *eval;
635 
636  CodingSpool spool;
637 
638  spool.typeSpool=0;
639  spool.typeSpoolLength=0;
640  spool.args = spool.argumentSpool;
641  spool.pout = &enc->out_buf;
642 
643  if (tempData->used_option[RoQ_ID_CCC]%2)
644  tempData->mainChunkSize+=8; //FIXME
645 
646  /* Write the video chunk header */
647  bytestream_put_le16(&enc->out_buf, RoQ_QUAD_VQ);
648  bytestream_put_le32(&enc->out_buf, tempData->mainChunkSize/8);
649  bytestream_put_byte(&enc->out_buf, 0x0);
650  bytestream_put_byte(&enc->out_buf, 0x0);
651 
652  for (i=0; i<numBlocks; i++) {
653  eval = tempData->cel_evals + i;
654 
655  x = eval->sourceX;
656  y = eval->sourceY;
657  dist += eval->eval_dist[eval->best_coding];
658 
659  switch (eval->best_coding) {
660  case RoQ_ID_MOT:
661  write_typecode(&spool, RoQ_ID_MOT);
662  break;
663 
664  case RoQ_ID_FCC:
665  bytestream_put_byte(&spool.args, motion_arg(eval->motion));
666 
667  write_typecode(&spool, RoQ_ID_FCC);
668  ff_apply_motion_8x8(enc, x, y,
669  eval->motion.d[0], eval->motion.d[1]);
670  break;
671 
672  case RoQ_ID_SLD:
673  bytestream_put_byte(&spool.args, tempData->i2f4[eval->cbEntry]);
674  write_typecode(&spool, RoQ_ID_SLD);
675 
676  qcell = enc->cb4x4 + eval->cbEntry;
677  ff_apply_vector_4x4(enc, x , y , enc->cb2x2 + qcell->idx[0]);
678  ff_apply_vector_4x4(enc, x+4, y , enc->cb2x2 + qcell->idx[1]);
679  ff_apply_vector_4x4(enc, x , y+4, enc->cb2x2 + qcell->idx[2]);
680  ff_apply_vector_4x4(enc, x+4, y+4, enc->cb2x2 + qcell->idx[3]);
681  break;
682 
683  case RoQ_ID_CCC:
684  write_typecode(&spool, RoQ_ID_CCC);
685 
686  for (j=0; j<4; j++) {
687  subX = x + 4*(j&1);
688  subY = y + 2*(j&2);
689 
690  switch(eval->subCels[j].best_coding) {
691  case RoQ_ID_MOT:
692  break;
693 
694  case RoQ_ID_FCC:
695  bytestream_put_byte(&spool.args,
696  motion_arg(eval->subCels[j].motion));
697 
698  ff_apply_motion_4x4(enc, subX, subY,
699  eval->subCels[j].motion.d[0],
700  eval->subCels[j].motion.d[1]);
701  break;
702 
703  case RoQ_ID_SLD:
704  bytestream_put_byte(&spool.args,
705  tempData->i2f4[eval->subCels[j].cbEntry]);
706 
707  qcell = enc->cb4x4 + eval->subCels[j].cbEntry;
708 
709  ff_apply_vector_2x2(enc, subX , subY ,
710  enc->cb2x2 + qcell->idx[0]);
711  ff_apply_vector_2x2(enc, subX+2, subY ,
712  enc->cb2x2 + qcell->idx[1]);
713  ff_apply_vector_2x2(enc, subX , subY+2,
714  enc->cb2x2 + qcell->idx[2]);
715  ff_apply_vector_2x2(enc, subX+2, subY+2,
716  enc->cb2x2 + qcell->idx[3]);
717  break;
718 
719  case RoQ_ID_CCC:
720  for (k=0; k<4; k++) {
721  int cb_idx = eval->subCels[j].subCels[k];
722  bytestream_put_byte(&spool.args,
723  tempData->i2f2[cb_idx]);
724 
725  ff_apply_vector_2x2(enc, subX + 2*(k&1), subY + (k&2),
726  enc->cb2x2 + cb_idx);
727  }
728  break;
729  }
730  write_typecode(&spool, eval->subCels[j].best_coding);
731  }
732  break;
733  }
734  }
735 
736  /* Flush the remainder of the argument/type spool */
737  while (spool.typeSpoolLength)
738  write_typecode(&spool, 0x0);
739 }
740 
741 
742 /**
743  * Create a single YUV cell from a 2x2 section of the image
744  */
745 static inline void frame_block_to_cell(uint8_t *block, uint8_t * const *data,
746  int top, int left, const int *stride)
747 {
748  int i, j, u=0, v=0;
749 
750  for (i=0; i<2; i++)
751  for (j=0; j<2; j++) {
752  int x = (top+i)*stride[0] + left + j;
753  *block++ = data[0][x];
754  x = (top+i)*stride[1] + left + j;
755  u += data[1][x];
756  v += data[2][x];
757  }
758 
759  *block++ = (u+2)/4;
760  *block++ = (v+2)/4;
761 }
762 
763 /**
764  * Create YUV clusters for the entire image
765  */
766 static void create_clusters(const AVFrame *frame, int w, int h, uint8_t *yuvClusters)
767 {
768  int i, j, k, l;
769 
770  for (i=0; i<h; i+=4)
771  for (j=0; j<w; j+=4) {
772  for (k=0; k < 2; k++)
773  for (l=0; l < 2; l++)
774  frame_block_to_cell(yuvClusters + (l + 2*k)*6, frame->data,
775  i+2*k, j+2*l, frame->linesize);
776  yuvClusters += 24;
777  }
778 }
779 
780 static int generate_codebook(RoqContext *enc, RoqTempdata *tempdata,
781  int *points, int inputCount, roq_cell *results,
782  int size, int cbsize)
783 {
784  int i, j, k, ret = 0;
785  int c_size = size*size/4;
786  int *buf;
787  int *codebook = av_malloc_array(6*c_size, cbsize*sizeof(int));
788  int *closest_cb;
789 
790  if (!codebook)
791  return AVERROR(ENOMEM);
792 
793  if (size == 4) {
794  closest_cb = av_malloc_array(6*c_size, inputCount*sizeof(int));
795  if (!closest_cb) {
796  ret = AVERROR(ENOMEM);
797  goto out;
798  }
799  } else
800  closest_cb = tempdata->closest_cb2;
801 
802  ret = avpriv_init_elbg(points, 6 * c_size, inputCount, codebook,
803  cbsize, 1, closest_cb, &enc->randctx);
804  if (ret < 0)
805  goto out;
806  ret = avpriv_do_elbg(points, 6 * c_size, inputCount, codebook,
807  cbsize, 1, closest_cb, &enc->randctx);
808  if (ret < 0)
809  goto out;
810 
811  buf = codebook;
812  for (i=0; i<cbsize; i++)
813  for (k=0; k<c_size; k++) {
814  for(j=0; j<4; j++)
815  results->y[j] = *buf++;
816 
817  results->u = (*buf++ + CHROMA_BIAS/2)/CHROMA_BIAS;
818  results->v = (*buf++ + CHROMA_BIAS/2)/CHROMA_BIAS;
819  results++;
820  }
821 out:
822  if (size == 4)
823  av_free(closest_cb);
824  av_free(codebook);
825  return ret;
826 }
827 
828 static int generate_new_codebooks(RoqContext *enc, RoqTempdata *tempData)
829 {
830  int i, j, ret = 0;
831  RoqCodebooks *codebooks = &tempData->codebooks;
832  int max = enc->width*enc->height/16;
833  uint8_t mb2[3*4];
834  roq_cell *results4 = av_malloc(sizeof(roq_cell)*MAX_CBS_4x4*4);
835  uint8_t *yuvClusters=av_malloc_array(max, sizeof(int)*6*4);
836  int *points = av_malloc_array(max, 6*4*sizeof(int));
837  int bias;
838 
839  if (!results4 || !yuvClusters || !points) {
840  ret = AVERROR(ENOMEM);
841  goto out;
842  }
843 
844  /* Subsample YUV data */
845  create_clusters(enc->frame_to_enc, enc->width, enc->height, yuvClusters);
846 
847  /* Cast to integer and apply chroma bias */
848  for (i=0; i<max*24; i++) {
849  bias = ((i%6)<4) ? 1 : CHROMA_BIAS;
850  points[i] = bias*yuvClusters[i];
851  }
852 
853  /* Create 4x4 codebooks */
854  if ((ret = generate_codebook(enc, tempData, points, max,
855  results4, 4, (enc->quake3_compat ? MAX_CBS_4x4-1 : MAX_CBS_4x4))) < 0)
856  goto out;
857 
858  codebooks->numCB4 = (enc->quake3_compat ? MAX_CBS_4x4-1 : MAX_CBS_4x4);
859 
860  tempData->closest_cb2 = av_malloc_array(max, 4*sizeof(int));
861  if (!tempData->closest_cb2) {
862  ret = AVERROR(ENOMEM);
863  goto out;
864  }
865 
866  /* Create 2x2 codebooks */
867  if ((ret = generate_codebook(enc, tempData, points, max * 4,
868  enc->cb2x2, 2, MAX_CBS_2x2)) < 0)
869  goto out;
870 
871  codebooks->numCB2 = MAX_CBS_2x2;
872 
873  /* Unpack 2x2 codebook clusters */
874  for (i=0; i<codebooks->numCB2; i++)
875  unpack_roq_cell(enc->cb2x2 + i, codebooks->unpacked_cb2 + i*2*2*3);
876 
877  /* Index all 4x4 entries to the 2x2 entries, unpack, and enlarge */
878  for (i=0; i<codebooks->numCB4; i++) {
879  for (j=0; j<4; j++) {
880  unpack_roq_cell(&results4[4*i + j], mb2);
881  index_mb(mb2, codebooks->unpacked_cb2, codebooks->numCB2,
882  &enc->cb4x4[i].idx[j], 2);
883  }
884  unpack_roq_qcell(codebooks->unpacked_cb2, enc->cb4x4 + i,
885  codebooks->unpacked_cb4 + i*4*4*3);
886  enlarge_roq_mb4(codebooks->unpacked_cb4 + i*4*4*3,
887  codebooks->unpacked_cb4_enlarged + i*8*8*3);
888  }
889 out:
890  av_free(yuvClusters);
891  av_free(points);
892  av_free(results4);
893  return ret;
894 }
895 
896 static int roq_encode_video(RoqContext *enc)
897 {
898  RoqTempdata *tempData = enc->tmpData;
899  int i, ret;
900 
901  memset(tempData, 0, sizeof(*tempData));
902 
903  ret = create_cel_evals(enc, tempData);
904  if (ret < 0)
905  return ret;
906 
907  ret = generate_new_codebooks(enc, tempData);
908  if (ret < 0)
909  return ret;
910 
911  if (enc->framesSinceKeyframe >= 1) {
912  motion_search(enc, 8);
913  motion_search(enc, 4);
914  }
915 
916  retry_encode:
917  for (i=0; i<enc->width*enc->height/64; i++)
918  gather_data_for_cel(tempData->cel_evals + i, enc, tempData);
919 
920  /* Quake 3 can't handle chunks bigger than 65535 bytes */
921  if (tempData->mainChunkSize/8 > 65535 && enc->quake3_compat) {
922  if (enc->lambda > 100000) {
923  av_log(enc->avctx, AV_LOG_ERROR, "Cannot encode video in Quake compatible form\n");
924  return AVERROR(EINVAL);
925  }
926  av_log(enc->avctx, AV_LOG_ERROR,
927  "Warning, generated a frame too big for Quake (%d > 65535), "
928  "now switching to a bigger qscale value.\n",
929  tempData->mainChunkSize/8);
930  enc->lambda *= 1.5;
931  tempData->mainChunkSize = 0;
932  memset(tempData->used_option, 0, sizeof(tempData->used_option));
933  memset(tempData->codebooks.usedCB4, 0,
934  sizeof(tempData->codebooks.usedCB4));
935  memset(tempData->codebooks.usedCB2, 0,
936  sizeof(tempData->codebooks.usedCB2));
937 
938  goto retry_encode;
939  }
940 
941  remap_codebooks(enc, tempData);
942 
943  write_codebooks(enc, tempData);
944 
945  reconstruct_and_encode_image(enc, tempData, enc->width, enc->height,
946  enc->width*enc->height/64);
947 
948  /* Rotate frame history */
949  FFSWAP(AVFrame *, enc->current_frame, enc->last_frame);
952 
953  av_freep(&tempData->cel_evals);
954  av_freep(&tempData->closest_cb2);
955 
956  enc->framesSinceKeyframe++;
957 
958  return 0;
959 }
960 
962 {
963  RoqContext *enc = avctx->priv_data;
964 
966  av_frame_free(&enc->last_frame);
967 
968  av_freep(&enc->tmpData);
969  av_freep(&enc->this_motion4);
970  av_freep(&enc->last_motion4);
971  av_freep(&enc->this_motion8);
972  av_freep(&enc->last_motion8);
973 
974  return 0;
975 }
976 
978 {
979  RoqContext *enc = avctx->priv_data;
980 
981  av_lfg_init(&enc->randctx, 1);
982 
983  enc->avctx = avctx;
984 
985  enc->framesSinceKeyframe = 0;
986  if ((avctx->width & 0xf) || (avctx->height & 0xf)) {
987  av_log(avctx, AV_LOG_ERROR, "Dimensions must be divisible by 16\n");
988  return AVERROR(EINVAL);
989  }
990 
991  if (avctx->width > 65535 || avctx->height > 65535) {
992  av_log(avctx, AV_LOG_ERROR, "Dimensions are max %d\n", enc->quake3_compat ? 32768 : 65535);
993  return AVERROR(EINVAL);
994  }
995 
996  if (((avctx->width)&(avctx->width-1))||((avctx->height)&(avctx->height-1)))
997  av_log(avctx, AV_LOG_ERROR, "Warning: dimensions not power of two, this is not supported by quake\n");
998 
999  enc->width = avctx->width;
1000  enc->height = avctx->height;
1001 
1002  enc->framesSinceKeyframe = 0;
1003  enc->first_frame = 1;
1004 
1005  enc->last_frame = av_frame_alloc();
1006  enc->current_frame = av_frame_alloc();
1007  if (!enc->last_frame || !enc->current_frame) {
1008  roq_encode_end(avctx);
1009  return AVERROR(ENOMEM);
1010  }
1011 
1012  enc->tmpData = av_malloc(sizeof(RoqTempdata));
1013 
1014  enc->this_motion4 =
1015  av_mallocz_array((enc->width*enc->height/16), sizeof(motion_vect));
1016 
1017  enc->last_motion4 =
1018  av_malloc_array ((enc->width*enc->height/16), sizeof(motion_vect));
1019 
1020  enc->this_motion8 =
1021  av_mallocz_array((enc->width*enc->height/64), sizeof(motion_vect));
1022 
1023  enc->last_motion8 =
1024  av_malloc_array ((enc->width*enc->height/64), sizeof(motion_vect));
1025 
1026  if (!enc->tmpData || !enc->this_motion4 || !enc->last_motion4 ||
1027  !enc->this_motion8 || !enc->last_motion8) {
1028  roq_encode_end(avctx);
1029  return AVERROR(ENOMEM);
1030  }
1031 
1032  return 0;
1033 }
1034 
1036 {
1037  /* ROQ info chunk */
1038  bytestream_put_le16(&enc->out_buf, RoQ_INFO);
1039 
1040  /* Size: 8 bytes */
1041  bytestream_put_le32(&enc->out_buf, 8);
1042 
1043  /* Unused argument */
1044  bytestream_put_byte(&enc->out_buf, 0x00);
1045  bytestream_put_byte(&enc->out_buf, 0x00);
1046 
1047  /* Width */
1048  bytestream_put_le16(&enc->out_buf, enc->width);
1049 
1050  /* Height */
1051  bytestream_put_le16(&enc->out_buf, enc->height);
1052 
1053  /* Unused in Quake 3, mimics the output of the real encoder */
1054  bytestream_put_byte(&enc->out_buf, 0x08);
1055  bytestream_put_byte(&enc->out_buf, 0x00);
1056  bytestream_put_byte(&enc->out_buf, 0x04);
1057  bytestream_put_byte(&enc->out_buf, 0x00);
1058 }
1059 
1061  const AVFrame *frame, int *got_packet)
1062 {
1063  RoqContext *enc = avctx->priv_data;
1064  int size, ret;
1065 
1066  enc->avctx = avctx;
1067 
1068  enc->frame_to_enc = frame;
1069 
1070  if (frame->quality)
1071  enc->lambda = frame->quality - 1;
1072  else
1073  enc->lambda = 2*ROQ_LAMBDA_SCALE;
1074 
1075  /* 138 bits max per 8x8 block +
1076  * 256 codebooks*(6 bytes 2x2 + 4 bytes 4x4) + 8 bytes frame header */
1077  size = ((enc->width * enc->height / 64) * 138 + 7) / 8 + 256 * (6 + 4) + 8;
1078  if ((ret = ff_alloc_packet2(avctx, pkt, size, 0)) < 0)
1079  return ret;
1080  enc->out_buf = pkt->data;
1081 
1082  /* Check for I-frame */
1083  if (enc->framesSinceKeyframe == avctx->gop_size)
1084  enc->framesSinceKeyframe = 0;
1085 
1086  if (enc->first_frame) {
1087  /* Alloc memory for the reconstruction data (we must know the stride
1088  for that) */
1089  if ((ret = ff_get_buffer(avctx, enc->current_frame, 0)) < 0 ||
1090  (ret = ff_get_buffer(avctx, enc->last_frame, 0)) < 0)
1091  return ret;
1092 
1093  /* Before the first video frame, write a "video info" chunk */
1095 
1096  enc->first_frame = 0;
1097  }
1098 
1099  /* Encode the actual frame */
1100  ret = roq_encode_video(enc);
1101  if (ret < 0)
1102  return ret;
1103 
1104  pkt->size = enc->out_buf - pkt->data;
1105  if (enc->framesSinceKeyframe == 1)
1107  *got_packet = 1;
1108 
1109  return 0;
1110 }
1111 
1112 #define OFFSET(x) offsetof(RoqContext, x)
1113 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1114 static const AVOption options[] = {
1115  { "quake3_compat", "Whether to respect known limitations in Quake 3 decoder", OFFSET(quake3_compat), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE },
1116  { NULL },
1117 };
1118 
1119 static const AVClass roq_class = {
1120  .class_name = "RoQ",
1121  .item_name = av_default_item_name,
1122  .option = options,
1123  .version = LIBAVUTIL_VERSION_INT,
1124 };
1125 
1127  .name = "roqvideo",
1128  .long_name = NULL_IF_CONFIG_SMALL("id RoQ video"),
1129  .type = AVMEDIA_TYPE_VIDEO,
1130  .id = AV_CODEC_ID_ROQ,
1131  .priv_data_size = sizeof(RoqContext),
1132  .init = roq_encode_init,
1133  .encode2 = roq_encode_frame,
1134  .close = roq_encode_end,
1135  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUVJ444P,
1136  AV_PIX_FMT_NONE },
1137  .priv_class = &roq_class,
1138 };
AVCodec
AVCodec.
Definition: avcodec.h:3481
RoqContext::tmpData
struct RoqTempData * tmpData
Definition: roqvideo.h:72
stride
int stride
Definition: mace.c:144
RoqCodebooks
Definition: roqvideoenc.c:209
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
gather_data_for_subcel
static void gather_data_for_subcel(SubcelEvaluation *subcel, int x, int y, RoqContext *enc, RoqTempdata *tempData)
Get distortion for all options available to a subcel.
Definition: roqvideoenc.c:402
RoqContext::cb4x4
roq_qcell cb4x4[256]
Definition: roqvideo.h:53
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
roqvideo.h
RoQ_ID_SLD
#define RoQ_ID_SLD
Definition: roqvideo.h:86
RoqContext::framesSinceKeyframe
unsigned int framesSinceKeyframe
Definition: roqvideo.h:68
RoqCodebooks::usedCB4
int usedCB4[MAX_CBS_4x4]
Definition: roqvideoenc.c:213
out
FILE * out
Definition: movenc.c:54
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
av_lfg_init
av_cold void av_lfg_init(AVLFG *c, unsigned int seed)
Definition: lfg.c:32
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:112
RoqContext::current_frame
AVFrame * current_frame
Definition: roqvideo.h:49
CodingSpool::argumentSpool
uint8_t argumentSpool[64]
Definition: roqvideoenc.c:606
n
int n
Definition: avisynth_c.h:760
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
VE
#define VE
Definition: roqvideoenc.c:1113
RoqCodebooks::unpacked_cb4
uint8_t unpacked_cb4[MAX_CBS_4x4 *4 *4 *3]
Definition: roqvideoenc.c:215
write_typecode
static void write_typecode(CodingSpool *s, uint8_t type)
Definition: roqvideoenc.c:612
SubcelEvaluation
Definition: roqvideoenc.c:187
RoqCodebooks::unpacked_cb2
uint8_t unpacked_cb2[MAX_CBS_2x2 *2 *2 *3]
Definition: roqvideoenc.c:214
count
void INT64 INT64 count
Definition: avisynth_c.h:767
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
RoqContext
Definition: roqvideo.h:44
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
ROQ_LAMBDA_SCALE
#define ROQ_LAMBDA_SCALE
Definition: roqvideoenc.c:78
roq_cell::y
unsigned char y[4]
Definition: roqvideo.h:30
RoqTempData::f2i4
int f2i4[MAX_CBS_4x4]
Definition: roqvideoenc.c:226
RoqContext::avctx
AVCodecContext * avctx
Definition: roqvideo.h:47
RoqCodebooks::unpacked_cb4_enlarged
uint8_t unpacked_cb4_enlarged[MAX_CBS_4x4 *8 *8 *3]
Definition: roqvideoenc.c:216
RoqContext::this_motion4
motion_vect * this_motion4
Definition: roqvideo.h:62
AVOption
AVOption.
Definition: opt.h:246
b
#define b
Definition: input.c:41
RoQ_ID_FCC
#define RoQ_ID_FCC
Definition: roqvideo.h:85
data
const char data[16]
Definition: mxf.c:91
unpack_roq_qcell
static void unpack_roq_qcell(uint8_t cb2[], roq_qcell *qcell, uint8_t u[4 *4 *3])
Definition: roqvideoenc.c:88
CelEvaluation::motion
motion_vect motion
Definition: roqvideoenc.c:203
ff_apply_motion_8x8
void ff_apply_motion_8x8(RoqContext *ri, int x, int y, int deltax, int deltay)
Definition: roqvideo.c:139
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
roq_cell
Definition: roqvideo.h:29
base
uint8_t base
Definition: vp3data.h:202
unpack_roq_cell
static void unpack_roq_cell(roq_cell *cell, uint8_t u[4 *3])
Definition: roqvideoenc.c:81
max
#define max(a, b)
Definition: cuda_runtime.h:33
RoqTempData::mainChunkSize
int mainChunkSize
Definition: roqvideoenc.c:231
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1509
SubcelEvaluation::subCels
int subCels[4]
Definition: roqvideoenc.c:192
reconstruct_and_encode_image
static void reconstruct_and_encode_image(RoqContext *enc, RoqTempdata *tempData, int w, int h, int numBlocks)
Definition: roqvideoenc.c:626
RoqContext::height
int height
Definition: roqvideo.h:56
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
motion_search
static void motion_search(RoqContext *enc, int blocksize)
Definition: roqvideoenc.c:310
RoqTempData::i2f4
int i2f4[MAX_CBS_4x4]
Definition: roqvideoenc.c:227
create_cel_evals
static int create_cel_evals(RoqContext *enc, RoqTempdata *tempData)
Initialize cel evaluators and set their source coordinates.
Definition: roqvideoenc.c:245
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
RoQ_ID_CCC
#define RoQ_ID_CCC
Definition: roqvideo.h:87
SubcelEvaluation::eval_dist
int eval_dist[4]
Definition: roqvideoenc.c:188
CelEvaluation::best_coding
int best_coding
Definition: roqvideoenc.c:199
CodingSpool::typeSpool
int typeSpool
Definition: roqvideoenc.c:604
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
ff_apply_vector_2x2
void ff_apply_vector_2x2(RoqContext *ri, int x, int y, roq_cell *cell)
Definition: roqvideo.c:41
s
#define s(width, name)
Definition: cbs_vp9.c:257
motion_arg
static uint8_t motion_arg(motion_vect mot)
Definition: roqvideoenc.c:596
RoQ_QUAD_CODEBOOK
#define RoQ_QUAD_CODEBOOK
Definition: roqvideo.h:79
RoqCodebooks::numCB2
int numCB2
Definition: roqvideoenc.c:211
RoQ_ID_MOT
#define RoQ_ID_MOT
Definition: roqvideo.h:84
motion_vect
Definition: roqvideo.h:38
SubcelEvaluation::cbEntry
int cbEntry
Definition: roqvideoenc.c:194
sse
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2703
RoqTempData::closest_cb2
int * closest_cb2
Definition: roqvideoenc.c:238
CelEvaluation::cbEntry
int cbEntry
Definition: roqvideoenc.c:204
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
CodingSpool::args
uint8_t * args
Definition: roqvideoenc.c:607
RoqContext::lambda
uint64_t lambda
Definition: roqvideo.h:60
roq_cell::v
unsigned char v
Definition: roqvideo.h:31
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
if
if(ret)
Definition: filter_design.txt:179
elbg.h
SubcelEvaluation::best_coding
int best_coding
Definition: roqvideoenc.c:190
RoqTempData
Temporary vars.
Definition: roqvideoenc.c:222
CelEvaluation::subCels
SubcelEvaluation subCels[4]
Definition: roqvideoenc.c:201
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
RoqTempData::used_option
int used_option[4]
Definition: roqvideoenc.c:239
index_mb
static int index_mb(uint8_t cluster[], uint8_t cb[], int numCB, int *outIndex, int dim)
Find the codebook with the lowest distortion from an image.
Definition: roqvideoenc.c:282
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
enlarge_roq_mb4
static void enlarge_roq_mb4(uint8_t base[3 *16], uint8_t u[3 *64])
Definition: roqvideoenc.c:103
RoqTempData::codebooks
RoqCodebooks codebooks
Definition: roqvideoenc.c:236
write_codebooks
static void write_codebooks(RoqContext *enc, RoqTempdata *tempData)
Write codebook chunk.
Definition: roqvideoenc.c:572
RoqTempData::cel_evals
CelEvaluation * cel_evals
Definition: roqvideoenc.c:224
avpriv_init_elbg
int avpriv_init_elbg(int *points, int dim, int numpoints, int *codebook, int numCB, int max_steps, int *closest_cb, AVLFG *rand_state)
Initialize the **codebook vector for the elbg algorithm.
Definition: elbg.c:337
frame_block_to_cell
static void frame_block_to_cell(uint8_t *block, uint8_t *const *data, int top, int left, const int *stride)
Create a single YUV cell from a 2x2 section of the image.
Definition: roqvideoenc.c:745
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
mathops.h
generate_new_codebooks
static int generate_new_codebooks(RoqContext *enc, RoqTempdata *tempData)
Definition: roqvideoenc.c:828
gather_data_for_cel
static void gather_data_for_cel(CelEvaluation *cel, RoqContext *enc, RoqTempdata *tempData)
Get distortion for all options available to a cel.
Definition: roqvideoenc.c:466
RoqTempData::i2f2
int i2f2[MAX_CBS_2x2]
Definition: roqvideoenc.c:229
squared_diff_macroblock
static int squared_diff_macroblock(uint8_t a[], uint8_t b[], int size)
Definition: roqvideoenc.c:173
index
int index
Definition: gxfenc.c:89
RoqContext::last_motion8
motion_vect * last_motion8
Definition: roqvideo.h:66
RoqContext::first_frame
int first_frame
Definition: roqvideo.h:50
roq_qcell
Definition: roqvideo.h:34
CelEvaluation::sourceX
int sourceX
Definition: roqvideoenc.c:206
remap_codebooks
static void remap_codebooks(RoqContext *enc, RoqTempdata *tempData)
Definition: roqvideoenc.c:540
CelEvaluation::sourceY
int sourceY
Definition: roqvideoenc.c:206
CelEvaluation::eval_dist
int eval_dist[4]
Definition: roqvideoenc.c:198
RoqCodebooks::numCB4
int numCB4
Definition: roqvideoenc.c:210
roq_encode_init
static av_cold int roq_encode_init(AVCodecContext *avctx)
Definition: roqvideoenc.c:977
AV_CODEC_ID_ROQ
@ AV_CODEC_ID_ROQ
Definition: avcodec.h:256
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1965
AVPacket::size
int size
Definition: avcodec.h:1478
roq_encode_video
static int roq_encode_video(RoqContext *enc)
Definition: roqvideoenc.c:896
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1760
MAX_CBS_2x2
#define MAX_CBS_2x2
Maximum number of 2x2 codebooks.
Definition: roqvideoenc.c:75
SubcelEvaluation::motion
motion_vect motion
Definition: roqvideoenc.c:193
CodingSpool::pout
uint8_t ** pout
Definition: roqvideoenc.c:608
get_frame_mb
static void get_frame_mb(const AVFrame *frame, int x, int y, uint8_t mb[], int dim)
Get macroblocks from parts of the image.
Definition: roqvideoenc.c:267
size
int size
Definition: twinvq_data.h:11134
EVAL_MOTION
#define EVAL_MOTION(MOTION)
Definition: roqvideoenc.c:300
CHROMA_BIAS
#define CHROMA_BIAS
Definition: roqvideoenc.c:67
ff_apply_vector_4x4
void ff_apply_vector_4x4(RoqContext *ri, int x, int y, roq_cell *cell)
Definition: roqvideo.c:71
RoQ_INFO
#define RoQ_INFO
Definition: roqvideo.h:78
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
roq_encode_end
static av_cold int roq_encode_end(AVCodecContext *avctx)
Definition: roqvideoenc.c:961
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1483
mb
#define mb
Definition: vf_colormatrix.c:101
options
static const AVOption options[]
Definition: roqvideoenc.c:1114
roq_write_video_info_chunk
static void roq_write_video_info_chunk(RoqContext *enc)
Definition: roqvideoenc.c:1035
roq_qcell::idx
int idx[4]
Definition: roqvideo.h:35
eval_motion_dist
static int eval_motion_dist(RoqContext *enc, int x, int y, motion_vect vect, int size)
Definition: roqvideoenc.c:146
bytestream_put_buffer
static av_always_inline void bytestream_put_buffer(uint8_t **b, const uint8_t *src, unsigned int size)
Definition: bytestream.h:368
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
RoqCodebooks::usedCB2
int usedCB2[MAX_CBS_2x2]
Definition: roqvideoenc.c:212
block_sse
static int block_sse(uint8_t *const *buf1, uint8_t *const *buf2, int x1, int y1, int x2, int y2, const int *stride1, const int *stride2, int size)
Definition: roqvideoenc.c:130
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
xf
#define xf(width, name, var, range_min, range_max, subs,...)
Definition: cbs_av1.c:668
RoqContext::last_frame
AVFrame * last_frame
Definition: roqvideo.h:48
CodingSpool::typeSpoolLength
int typeSpoolLength
Definition: roqvideoenc.c:605
uint8_t
uint8_t
Definition: audio_convert.c:194
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
ff_roq_encoder
AVCodec ff_roq_encoder
Definition: roqvideoenc.c:1126
AVCodecContext::height
int height
Definition: avcodec.h:1738
motion_vect::d
int d[2]
Definition: roqvideo.h:39
MAX_CBS_4x4
#define MAX_CBS_4x4
Maximum number of generated 4x4 codebooks.
Definition: roqvideoenc.c:73
RoQ_QUAD_VQ
#define RoQ_QUAD_VQ
Definition: roqvideo.h:80
roq_encode_frame
static int roq_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
Definition: roqvideoenc.c:1060
dim
int dim
Definition: vorbis_enc_data.h:451
mid_pred
#define mid_pred
Definition: mathops.h:97
ret
ret
Definition: filter_design.txt:187
generate_codebook
static int generate_codebook(RoqContext *enc, RoqTempdata *tempdata, int *points, int inputCount, roq_cell *results, int size, int cbsize)
Definition: roqvideoenc.c:780
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
square
static int square(int x)
Definition: roqvideoenc.c:113
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
ff_apply_motion_4x4
void ff_apply_motion_4x4(RoqContext *ri, int x, int y, int deltax, int deltay)
Definition: roqvideo.c:133
RoqContext::this_motion8
motion_vect * this_motion8
Definition: roqvideo.h:65
CodingSpool
Definition: roqvideoenc.c:603
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
RoqContext::out_buf
uint8_t * out_buf
Definition: roqvideo.h:71
RoqContext::last_motion4
motion_vect * last_motion4
Definition: roqvideo.h:63
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
OFFSET
#define OFFSET(x)
Definition: roqvideoenc.c:1112
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
roq_cell::u
unsigned char u
Definition: roqvideo.h:31
CelEvaluation
Definition: roqvideoenc.c:197
eval_sse
static int eval_sse(const uint8_t *a, const uint8_t *b, int count)
Definition: roqvideoenc.c:118
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
roq_class
static const AVClass roq_class
Definition: roqvideoenc.c:1119
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
RoqTempData::numCB4
int numCB4
Definition: roqvideoenc.c:233
RoqTempData::numCB2
int numCB2
Definition: roqvideoenc.c:234
h
h
Definition: vp9dsp_template.c:2038
create_clusters
static void create_clusters(const AVFrame *frame, int w, int h, uint8_t *yuvClusters)
Create YUV clusters for the entire image.
Definition: roqvideoenc.c:766
SubcelEvaluation::best_bit_use
int best_bit_use
Definition: roqvideoenc.c:189
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
RoqContext::frame_to_enc
const AVFrame * frame_to_enc
Definition: roqvideo.h:70
RoqContext::randctx
AVLFG randctx
Definition: roqvideo.h:59
RoqContext::width
int width
Definition: roqvideo.h:56
RoqTempData::f2i2
int f2i2[MAX_CBS_2x2]
Definition: roqvideoenc.c:228
avpriv_do_elbg
int avpriv_do_elbg(int *points, int dim, int numpoints, int *codebook, int numCB, int max_steps, int *closest_cb, AVLFG *rand_state)
Implementation of the Enhanced LBG Algorithm Based on the paper "Neural Networks 14:1219-1237" that c...
Definition: elbg.c:371
RoqContext::cb2x2
roq_cell cb2x2[256]
Definition: roqvideo.h:52
RoqContext::quake3_compat
int quake3_compat
Definition: roqvideo.h:74