FFmpeg
msvideo1.c
Go to the documentation of this file.
1 /*
2  * Microsoft Video-1 Decoder
3  * Copyright (C) 2003 The FFmpeg project
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Microsoft Video-1 Decoder by Mike Melanson (melanson@pcisys.net)
25  * For more information about the MS Video-1 format, visit:
26  * http://www.pcisys.net/~melanson/codecs/
27  */
28 
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 
33 #include "libavutil/internal.h"
34 #include "libavutil/intreadwrite.h"
35 #include "avcodec.h"
36 #include "decode.h"
37 #include "internal.h"
38 
39 #define PALETTE_COUNT 256
40 #define CHECK_STREAM_PTR(n) \
41  if ((stream_ptr + n) > s->size ) { \
42  av_log(s->avctx, AV_LOG_ERROR, " MS Video-1 warning: stream_ptr out of bounds (%d >= %d)\n", \
43  stream_ptr + n, s->size); \
44  return; \
45  }
46 
47 typedef struct Msvideo1Context {
48 
51 
52  const unsigned char *buf;
53  int size;
54 
55  int mode_8bit; /* if it's not 8-bit, it's 16-bit */
56 
57  uint32_t pal[256];
59 
61 {
62  Msvideo1Context *s = avctx->priv_data;
63 
64  s->avctx = avctx;
65 
66  if (avctx->width < 4 || avctx->height < 4)
67  return AVERROR_INVALIDDATA;
68 
69  /* figure out the colorspace based on the presence of a palette */
70  if (s->avctx->bits_per_coded_sample == 8) {
71  s->mode_8bit = 1;
72  avctx->pix_fmt = AV_PIX_FMT_PAL8;
73  if (avctx->extradata_size >= AVPALETTE_SIZE)
74  memcpy(s->pal, avctx->extradata, AVPALETTE_SIZE);
75  } else {
76  s->mode_8bit = 0;
77  avctx->pix_fmt = AV_PIX_FMT_RGB555;
78  }
79 
80  s->frame = av_frame_alloc();
81  if (!s->frame)
82  return AVERROR(ENOMEM);
83 
84  return 0;
85 }
86 
88 {
89  int block_ptr, pixel_ptr;
90  int total_blocks;
91  int pixel_x, pixel_y; /* pixel width and height iterators */
92  int block_x, block_y; /* block width and height iterators */
93  int blocks_wide, blocks_high; /* width and height in 4x4 blocks */
94  int block_inc;
95  int row_dec;
96 
97  /* decoding parameters */
98  int stream_ptr;
99  unsigned char byte_a, byte_b;
100  unsigned short flags;
101  int skip_blocks;
102  unsigned char colors[8];
103  unsigned char *pixels = s->frame->data[0];
104  int stride = s->frame->linesize[0];
105 
106  stream_ptr = 0;
107  skip_blocks = 0;
108  blocks_wide = s->avctx->width / 4;
109  blocks_high = s->avctx->height / 4;
110  total_blocks = blocks_wide * blocks_high;
111  block_inc = 4;
112  row_dec = stride + 4;
113 
114  for (block_y = blocks_high; block_y > 0; block_y--) {
115  block_ptr = ((block_y * 4) - 1) * stride;
116  for (block_x = blocks_wide; block_x > 0; block_x--) {
117  /* check if this block should be skipped */
118  if (skip_blocks) {
119  block_ptr += block_inc;
120  skip_blocks--;
121  total_blocks--;
122  continue;
123  }
124 
125  pixel_ptr = block_ptr;
126 
127  /* get the next two bytes in the encoded data stream */
128  CHECK_STREAM_PTR(2);
129  byte_a = s->buf[stream_ptr++];
130  byte_b = s->buf[stream_ptr++];
131 
132  /* check if the decode is finished */
133  if ((byte_a == 0) && (byte_b == 0) && (total_blocks == 0))
134  return;
135  else if ((byte_b & 0xFC) == 0x84) {
136  /* skip code, but don't count the current block */
137  skip_blocks = ((byte_b - 0x84) << 8) + byte_a - 1;
138  } else if (byte_b < 0x80) {
139  /* 2-color encoding */
140  flags = (byte_b << 8) | byte_a;
141 
142  CHECK_STREAM_PTR(2);
143  colors[0] = s->buf[stream_ptr++];
144  colors[1] = s->buf[stream_ptr++];
145 
146  for (pixel_y = 0; pixel_y < 4; pixel_y++) {
147  for (pixel_x = 0; pixel_x < 4; pixel_x++, flags >>= 1)
148  pixels[pixel_ptr++] = colors[(flags & 0x1) ^ 1];
149  pixel_ptr -= row_dec;
150  }
151  } else if (byte_b >= 0x90) {
152  /* 8-color encoding */
153  flags = (byte_b << 8) | byte_a;
154 
155  CHECK_STREAM_PTR(8);
156  memcpy(colors, &s->buf[stream_ptr], 8);
157  stream_ptr += 8;
158 
159  for (pixel_y = 0; pixel_y < 4; pixel_y++) {
160  for (pixel_x = 0; pixel_x < 4; pixel_x++, flags >>= 1)
161  pixels[pixel_ptr++] =
162  colors[((pixel_y & 0x2) << 1) +
163  (pixel_x & 0x2) + ((flags & 0x1) ^ 1)];
164  pixel_ptr -= row_dec;
165  }
166  } else {
167  /* 1-color encoding */
168  colors[0] = byte_a;
169 
170  for (pixel_y = 0; pixel_y < 4; pixel_y++) {
171  for (pixel_x = 0; pixel_x < 4; pixel_x++)
172  pixels[pixel_ptr++] = colors[0];
173  pixel_ptr -= row_dec;
174  }
175  }
176 
177  block_ptr += block_inc;
178  total_blocks--;
179  }
180  }
181 
182  /* make the palette available on the way out */
183  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
184  memcpy(s->frame->data[1], s->pal, AVPALETTE_SIZE);
185 }
186 
188 {
189  int block_ptr, pixel_ptr;
190  int total_blocks;
191  int pixel_x, pixel_y; /* pixel width and height iterators */
192  int block_x, block_y; /* block width and height iterators */
193  int blocks_wide, blocks_high; /* width and height in 4x4 blocks */
194  int block_inc;
195  int row_dec;
196 
197  /* decoding parameters */
198  int stream_ptr;
199  unsigned char byte_a, byte_b;
200  unsigned short flags;
201  int skip_blocks;
202  unsigned short colors[8];
203  unsigned short *pixels = (unsigned short *)s->frame->data[0];
204  int stride = s->frame->linesize[0] / 2;
205 
206  stream_ptr = 0;
207  skip_blocks = 0;
208  blocks_wide = s->avctx->width / 4;
209  blocks_high = s->avctx->height / 4;
210  total_blocks = blocks_wide * blocks_high;
211  block_inc = 4;
212  row_dec = stride + 4;
213 
214  for (block_y = blocks_high; block_y > 0; block_y--) {
215  block_ptr = ((block_y * 4) - 1) * stride;
216  for (block_x = blocks_wide; block_x > 0; block_x--) {
217  /* check if this block should be skipped */
218  if (skip_blocks) {
219  block_ptr += block_inc;
220  skip_blocks--;
221  total_blocks--;
222  continue;
223  }
224 
225  pixel_ptr = block_ptr;
226 
227  /* get the next two bytes in the encoded data stream */
228  CHECK_STREAM_PTR(2);
229  byte_a = s->buf[stream_ptr++];
230  byte_b = s->buf[stream_ptr++];
231 
232  /* check if the decode is finished */
233  if ((byte_a == 0) && (byte_b == 0) && (total_blocks == 0)) {
234  return;
235  } else if ((byte_b & 0xFC) == 0x84) {
236  /* skip code, but don't count the current block */
237  skip_blocks = ((byte_b - 0x84) << 8) + byte_a - 1;
238  } else if (byte_b < 0x80) {
239  /* 2- or 8-color encoding modes */
240  flags = (byte_b << 8) | byte_a;
241 
242  CHECK_STREAM_PTR(4);
243  colors[0] = AV_RL16(&s->buf[stream_ptr]);
244  stream_ptr += 2;
245  colors[1] = AV_RL16(&s->buf[stream_ptr]);
246  stream_ptr += 2;
247 
248  if (colors[0] & 0x8000) {
249  /* 8-color encoding */
250  CHECK_STREAM_PTR(12);
251  colors[2] = AV_RL16(&s->buf[stream_ptr]);
252  stream_ptr += 2;
253  colors[3] = AV_RL16(&s->buf[stream_ptr]);
254  stream_ptr += 2;
255  colors[4] = AV_RL16(&s->buf[stream_ptr]);
256  stream_ptr += 2;
257  colors[5] = AV_RL16(&s->buf[stream_ptr]);
258  stream_ptr += 2;
259  colors[6] = AV_RL16(&s->buf[stream_ptr]);
260  stream_ptr += 2;
261  colors[7] = AV_RL16(&s->buf[stream_ptr]);
262  stream_ptr += 2;
263 
264  for (pixel_y = 0; pixel_y < 4; pixel_y++) {
265  for (pixel_x = 0; pixel_x < 4; pixel_x++, flags >>= 1)
266  pixels[pixel_ptr++] =
267  colors[((pixel_y & 0x2) << 1) +
268  (pixel_x & 0x2) + ((flags & 0x1) ^ 1)];
269  pixel_ptr -= row_dec;
270  }
271  } else {
272  /* 2-color encoding */
273  for (pixel_y = 0; pixel_y < 4; pixel_y++) {
274  for (pixel_x = 0; pixel_x < 4; pixel_x++, flags >>= 1)
275  pixels[pixel_ptr++] = colors[(flags & 0x1) ^ 1];
276  pixel_ptr -= row_dec;
277  }
278  }
279  } else {
280  /* otherwise, it's a 1-color block */
281  colors[0] = (byte_b << 8) | byte_a;
282 
283  for (pixel_y = 0; pixel_y < 4; pixel_y++) {
284  for (pixel_x = 0; pixel_x < 4; pixel_x++)
285  pixels[pixel_ptr++] = colors[0];
286  pixel_ptr -= row_dec;
287  }
288  }
289 
290  block_ptr += block_inc;
291  total_blocks--;
292  }
293  }
294 }
295 
297  void *data, int *got_frame,
298  AVPacket *avpkt)
299 {
300  const uint8_t *buf = avpkt->data;
301  int buf_size = avpkt->size;
302  Msvideo1Context *s = avctx->priv_data;
303  int ret;
304 
305  s->buf = buf;
306  s->size = buf_size;
307 
308  // Discard frame if its smaller than the minimum frame size
309  if (buf_size < (avctx->width/4) * (avctx->height/4) / 512) {
310  av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
311  return AVERROR_INVALIDDATA;
312  }
313 
314  if ((ret = ff_reget_buffer(avctx, s->frame, 0)) < 0)
315  return ret;
316 
317  if (s->mode_8bit) {
318  s->frame->palette_has_changed = ff_copy_palette(s->pal, avpkt, avctx);
319  }
320 
321  if (s->mode_8bit)
323  else
325 
326  if ((ret = av_frame_ref(data, s->frame)) < 0)
327  return ret;
328 
329  *got_frame = 1;
330 
331  /* report that the buffer was completely consumed */
332  return buf_size;
333 }
334 
336 {
337  Msvideo1Context *s = avctx->priv_data;
338 
339  av_frame_free(&s->frame);
340 
341  return 0;
342 }
343 
345  .name = "msvideo1",
346  .long_name = NULL_IF_CONFIG_SMALL("Microsoft Video 1"),
347  .type = AVMEDIA_TYPE_VIDEO,
348  .id = AV_CODEC_ID_MSVIDEO1,
349  .priv_data_size = sizeof(Msvideo1Context),
351  .close = msvideo1_decode_end,
353  .capabilities = AV_CODEC_CAP_DR1,
354 };
uint32_t pal[256]
Definition: msvideo1.c:57
static int msvideo1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: msvideo1.c:296
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
int ff_copy_palette(void *dst, const AVPacket *src, void *logctx)
Check whether the side-data of src contains a palette of size AVPALETTE_SIZE; if so, copy it to dst and return 1; else return 0.
Definition: decode.c:2095
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
AVCodecContext * avctx
Definition: msvideo1.c:49
int size
Definition: packet.h:370
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
AVCodec.
Definition: codec.h:197
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:91
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static av_cold int msvideo1_decode_end(AVCodecContext *avctx)
Definition: msvideo1.c:335
uint8_t
#define av_cold
Definition: attributes.h:88
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:191
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
uint8_t * data
Definition: packet.h:369
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1740
#define av_log(a,...)
const unsigned char * buf
Definition: msvideo1.c:52
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:204
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:117
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available...
Definition: decode.c:2000
const char * name
Name of the codec implementation.
Definition: codec.h:204
AVCodec ff_msvideo1_decoder
Definition: msvideo1.c:344
static void msvideo1_decode_16bit(Msvideo1Context *s)
Definition: msvideo1.c:187
common internal API header
int width
picture width / height.
Definition: avcodec.h:709
#define s(width, name)
Definition: cbs_vp9.c:257
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
main external API structure.
Definition: avcodec.h:536
int extradata_size
Definition: avcodec.h:638
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:475
static void msvideo1_decode_8bit(Msvideo1Context *s)
Definition: msvideo1.c:87
#define CHECK_STREAM_PTR(n)
Definition: msvideo1.c:40
AVFrame * frame
Definition: msvideo1.c:50
#define flags(name, subs,...)
Definition: cbs_av1.c:561
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
static av_cold int msvideo1_decode_init(AVCodecContext *avctx)
Definition: msvideo1.c:60
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
common internal api header.
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:387
void * priv_data
Definition: avcodec.h:563
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
This structure stores compressed data.
Definition: packet.h:346
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators...
Definition: codec.h:52
for(j=16;j >0;--j)