FFmpeg
qpeg.c
Go to the documentation of this file.
1 /*
2  * QPEG codec
3  * Copyright (c) 2004 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * QPEG codec.
25  */
26 
27 #include "avcodec.h"
28 #include "bytestream.h"
29 #include "internal.h"
30 
31 typedef struct QpegContext{
34  uint32_t pal[256];
36 } QpegContext;
37 
38 static void qpeg_decode_intra(QpegContext *qctx, uint8_t *dst,
39  int stride, int width, int height)
40 {
41  int i;
42  int code;
43  int c0, c1;
44  int run, copy;
45  int filled = 0;
46  int rows_to_go;
47 
48  rows_to_go = height;
49  height--;
50  dst = dst + height * stride;
51 
52  while ((bytestream2_get_bytes_left(&qctx->buffer) > 0) && (rows_to_go > 0)) {
53  code = bytestream2_get_byte(&qctx->buffer);
54  run = copy = 0;
55  if(code == 0xFC) /* end-of-picture code */
56  break;
57  if(code >= 0xF8) { /* very long run */
58  c0 = bytestream2_get_byte(&qctx->buffer);
59  c1 = bytestream2_get_byte(&qctx->buffer);
60  run = ((code & 0x7) << 16) + (c0 << 8) + c1 + 2;
61  } else if (code >= 0xF0) { /* long run */
62  c0 = bytestream2_get_byte(&qctx->buffer);
63  run = ((code & 0xF) << 8) + c0 + 2;
64  } else if (code >= 0xE0) { /* short run */
65  run = (code & 0x1F) + 2;
66  } else if (code >= 0xC0) { /* very long copy */
67  c0 = bytestream2_get_byte(&qctx->buffer);
68  c1 = bytestream2_get_byte(&qctx->buffer);
69  copy = ((code & 0x3F) << 16) + (c0 << 8) + c1 + 1;
70  } else if (code >= 0x80) { /* long copy */
71  c0 = bytestream2_get_byte(&qctx->buffer);
72  copy = ((code & 0x7F) << 8) + c0 + 1;
73  } else { /* short copy */
74  copy = code + 1;
75  }
76 
77  /* perform actual run or copy */
78  if(run) {
79  int p;
80 
81  p = bytestream2_get_byte(&qctx->buffer);
82  for(i = 0; i < run; i++) {
83  int step = FFMIN(run - i, width - filled);
84  memset(dst+filled, p, step);
85  filled += step;
86  i += step - 1;
87  if (filled >= width) {
88  filled = 0;
89  dst -= stride;
90  rows_to_go--;
91  while (run - i > width && rows_to_go > 0) {
92  memset(dst, p, width);
93  dst -= stride;
94  rows_to_go--;
95  i += width;
96  }
97  if(rows_to_go <= 0)
98  break;
99  }
100  }
101  } else {
102  if (bytestream2_get_bytes_left(&qctx->buffer) < copy)
104  for(i = 0; i < copy; i++) {
105  dst[filled++] = bytestream2_get_byte(&qctx->buffer);
106  if (filled >= width) {
107  filled = 0;
108  dst -= stride;
109  rows_to_go--;
110  if(rows_to_go <= 0)
111  break;
112  }
113  }
114  }
115  }
116 }
117 
118 static const int qpeg_table_h[16] =
119  { 0x00, 0x20, 0x20, 0x20, 0x18, 0x10, 0x10, 0x20, 0x10, 0x08, 0x18, 0x08, 0x08, 0x18, 0x10, 0x04};
120 static const int qpeg_table_w[16] =
121  { 0x00, 0x20, 0x18, 0x08, 0x18, 0x10, 0x20, 0x10, 0x08, 0x10, 0x20, 0x20, 0x08, 0x10, 0x18, 0x04};
122 
123 /* Decodes delta frames */
125  int stride, int width, int height,
126  int delta, const uint8_t *ctable,
127  uint8_t *refdata)
128 {
129  int i, j;
130  int code;
131  int filled = 0;
132  int orig_height;
133 
134  if (refdata) {
135  /* copy prev frame */
136  for (i = 0; i < height; i++)
137  memcpy(dst + (i * stride), refdata + (i * stride), width);
138  } else {
139  refdata = dst;
140  }
141 
142  orig_height = height;
143  height--;
144  dst = dst + height * stride;
145 
146  while ((bytestream2_get_bytes_left(&qctx->buffer) > 0) && (height >= 0)) {
147  code = bytestream2_get_byte(&qctx->buffer);
148 
149  if(delta) {
150  /* motion compensation */
151  while(bytestream2_get_bytes_left(&qctx->buffer) > 0 && (code & 0xF0) == 0xF0) {
152  if(delta == 1) {
153  int me_idx;
154  int me_w, me_h, me_x, me_y;
155  uint8_t *me_plane;
156  int corr, val;
157 
158  /* get block size by index */
159  me_idx = code & 0xF;
160  me_w = qpeg_table_w[me_idx];
161  me_h = qpeg_table_h[me_idx];
162 
163  /* extract motion vector */
164  corr = bytestream2_get_byte(&qctx->buffer);
165 
166  val = corr >> 4;
167  if(val > 7)
168  val -= 16;
169  me_x = val;
170 
171  val = corr & 0xF;
172  if(val > 7)
173  val -= 16;
174  me_y = val;
175 
176  /* check motion vector */
177  if ((me_x + filled < 0) || (me_x + me_w + filled > width) ||
178  (height - me_y - me_h < 0) || (height - me_y >= orig_height) ||
179  (filled + me_w > width) || (height - me_h < 0))
180  av_log(qctx->avctx, AV_LOG_ERROR, "Bogus motion vector (%i,%i), block size %ix%i at %i,%i\n",
181  me_x, me_y, me_w, me_h, filled, height);
182  else {
183  /* do motion compensation */
184  me_plane = refdata + (filled + me_x) + (height - me_y) * stride;
185  for(j = 0; j < me_h; j++) {
186  for(i = 0; i < me_w; i++)
187  dst[filled + i - (j * stride)] = me_plane[i - (j * stride)];
188  }
189  }
190  }
191  code = bytestream2_get_byte(&qctx->buffer);
192  }
193  }
194 
195  if(code == 0xE0) /* end-of-picture code */
196  break;
197  if(code > 0xE0) { /* run code: 0xE1..0xFF */
198  int p;
199 
200  code &= 0x1F;
201  p = bytestream2_get_byte(&qctx->buffer);
202  for(i = 0; i <= code; i++) {
203  dst[filled++] = p;
204  if(filled >= width) {
205  filled = 0;
206  dst -= stride;
207  height--;
208  if (height < 0)
209  break;
210  }
211  }
212  } else if(code >= 0xC0) { /* copy code: 0xC0..0xDF */
213  code &= 0x1F;
214 
215  if(code + 1 > bytestream2_get_bytes_left(&qctx->buffer))
216  break;
217 
218  for(i = 0; i <= code; i++) {
219  dst[filled++] = bytestream2_get_byte(&qctx->buffer);
220  if(filled >= width) {
221  filled = 0;
222  dst -= stride;
223  height--;
224  if (height < 0)
225  break;
226  }
227  }
228  } else if(code >= 0x80) { /* skip code: 0x80..0xBF */
229  int skip;
230 
231  code &= 0x3F;
232  /* codes 0x80 and 0x81 are actually escape codes,
233  skip value minus constant is in the next byte */
234  if(!code)
235  skip = bytestream2_get_byte(&qctx->buffer) + 64;
236  else if(code == 1)
237  skip = bytestream2_get_byte(&qctx->buffer) + 320;
238  else
239  skip = code;
240  filled += skip;
241  while( filled >= width) {
242  filled -= width;
243  dst -= stride;
244  height--;
245  if(height < 0)
246  break;
247  }
248  } else {
249  /* zero code treated as one-pixel skip */
250  if(code) {
251  dst[filled++] = ctable[code & 0x7F];
252  }
253  else
254  filled++;
255  if(filled >= width) {
256  filled = 0;
257  dst -= stride;
258  height--;
259  }
260  }
261  }
262 }
263 
264 static int decode_frame(AVCodecContext *avctx,
265  void *data, int *got_frame,
266  AVPacket *avpkt)
267 {
268  uint8_t ctable[128];
269  QpegContext * const a = avctx->priv_data;
270  AVFrame * const p = a->pic;
271  AVFrame * const ref = a->ref;
272  uint8_t* outdata;
273  int delta, ret;
274  int pal_size;
275  const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, &pal_size);
276 
277  if (avpkt->size < 0x86) {
278  av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
279  return AVERROR_INVALIDDATA;
280  }
281 
282  bytestream2_init(&a->buffer, avpkt->data, avpkt->size);
283 
286 
287  if ((ret = ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF)) < 0)
288  return ret;
289  outdata = p->data[0];
290  bytestream2_skip(&a->buffer, 4);
291  bytestream2_get_buffer(&a->buffer, ctable, 128);
292  bytestream2_skip(&a->buffer, 1);
293 
294  delta = bytestream2_get_byte(&a->buffer);
295  if(delta == 0x10) {
296  qpeg_decode_intra(a, outdata, p->linesize[0], avctx->width, avctx->height);
297  } else {
298  qpeg_decode_inter(a, outdata, p->linesize[0], avctx->width, avctx->height, delta, ctable, ref->data[0]);
299  }
300 
301  /* make the palette available on the way out */
302  if (pal && pal_size == AVPALETTE_SIZE) {
303  p->palette_has_changed = 1;
304  memcpy(a->pal, pal, AVPALETTE_SIZE);
305  } else if (pal) {
306  av_log(avctx, AV_LOG_ERROR, "Palette size %d is wrong\n", pal_size);
307  }
308  memcpy(p->data[1], a->pal, AVPALETTE_SIZE);
309 
310  if ((ret = av_frame_ref(data, p)) < 0)
311  return ret;
312 
313  *got_frame = 1;
314 
315  return avpkt->size;
316 }
317 
318 static void decode_flush(AVCodecContext *avctx){
319  QpegContext * const a = avctx->priv_data;
320  int i, pal_size;
321  const uint8_t *pal_src;
322 
323  pal_size = FFMIN(1024U, avctx->extradata_size);
324  pal_src = avctx->extradata + avctx->extradata_size - pal_size;
325 
326  for (i=0; i<pal_size/4; i++)
327  a->pal[i] = 0xFFU<<24 | AV_RL32(pal_src+4*i);
328 }
329 
331 {
332  QpegContext * const a = avctx->priv_data;
333 
334  av_frame_free(&a->pic);
335  av_frame_free(&a->ref);
336 
337  return 0;
338 }
339 
340 static av_cold int decode_init(AVCodecContext *avctx){
341  QpegContext * const a = avctx->priv_data;
342 
343  a->avctx = avctx;
344  avctx->pix_fmt= AV_PIX_FMT_PAL8;
345 
346  decode_flush(avctx);
347 
348  a->pic = av_frame_alloc();
349  a->ref = av_frame_alloc();
350  if (!a->pic || !a->ref) {
351  decode_end(avctx);
352  return AVERROR(ENOMEM);
353  }
354 
355  return 0;
356 }
357 
359  .name = "qpeg",
360  .long_name = NULL_IF_CONFIG_SMALL("Q-team QPEG"),
361  .type = AVMEDIA_TYPE_VIDEO,
362  .id = AV_CODEC_ID_QPEG,
363  .priv_data_size = sizeof(QpegContext),
364  .init = decode_init,
365  .close = decode_end,
366  .decode = decode_frame,
367  .flush = decode_flush,
368  .capabilities = AV_CODEC_CAP_DR1,
369 };
AVCodec
AVCodec.
Definition: avcodec.h:3481
stride
int stride
Definition: mace.c:144
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
qpeg_decode_inter
static void av_noinline qpeg_decode_inter(QpegContext *qctx, uint8_t *dst, int stride, int width, int height, int delta, const uint8_t *ctable, uint8_t *refdata)
Definition: qpeg.c:124
GetByteContext
Definition: bytestream.h:33
qpeg_decode_intra
static void qpeg_decode_intra(QpegContext *qctx, uint8_t *dst, int stride, int width, int height)
Definition: qpeg.c:38
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: qpeg.c:318
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
AV_PKT_DATA_PALETTE
@ AV_PKT_DATA_PALETTE
An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE bytes worth of palette.
Definition: avcodec.h:1190
data
const char data[16]
Definition: mxf.c:91
c1
static const uint64_t c1
Definition: murmur3.c:49
ff_qpeg_decoder
AVCodec ff_qpeg_decoder
Definition: qpeg.c:358
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
bytestream2_get_bytes_left
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
qpeg_table_w
static const int qpeg_table_w[16]
Definition: qpeg.c:120
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
U
#define U(x)
Definition: vp56_arith.h:37
av_noinline
#define av_noinline
Definition: attributes.h:66
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
qpeg_table_h
static const int qpeg_table_h[16]
Definition: qpeg.c:118
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:1667
QpegContext::buffer
GetByteContext buffer
Definition: qpeg.c:35
width
#define width
QpegContext::pal
uint32_t pal[256]
Definition: qpeg.c:34
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1176
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:350
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
run
uint8_t run
Definition: svq3.c:206
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:263
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1965
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
AVPacket::size
int size
Definition: avcodec.h:1478
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
copy
static void copy(const float *p1, float *p2, const int length)
Definition: vf_vaguedenoiser.c:185
QpegContext::pic
AVFrame * pic
Definition: qpeg.c:33
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: qpeg.c:330
QpegContext::avctx
AVCodecContext * avctx
Definition: qpeg.c:32
val
const char const char void * val
Definition: avisynth_c.h:863
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AV_CODEC_ID_QPEG
@ AV_CODEC_ID_QPEG
Definition: avcodec.h:278
QpegContext
Definition: qpeg.c:31
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
delta
float delta
Definition: vorbis_enc_data.h:457
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
AVCodecContext::height
int height
Definition: avcodec.h:1738
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
avcodec.h
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: qpeg.c:340
ret
ret
Definition: filter_design.txt:187
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
QpegContext::ref
AVFrame * ref
Definition: qpeg.c:33
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: qpeg.c:264
AVFrame::palette_has_changed
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:452
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59