Go to the documentation of this file.
32 #define CACHED_BITSTREAM_READER !ARCH_X86_32
79 uint8_t *ptr, *ptr1, *ptr2;
83 ptr = picture->
data[0];
84 ptr1 = picture->
data[1];
85 ptr2 = picture->
data[2];
89 for (
int y = 0; y <
height; y += 2) {
91 for (
int x = 0; x <
width - 1; x++) {
92 fill = bytestream2_get_byte(gb);
94 *(dst++) = (fill + bytestream2_peek_byte(gb) + 1) >> 1;
96 fill = bytestream2_get_byte(gb);
103 for (
int x = 0; x <
width - 1; x++) {
104 fill = bytestream2_get_byte(gb);
106 *(dst++) = (fill + bytestream2_peek_byte(gb) + 1) >> 1;
108 fill = bytestream2_get_byte(gb);
115 for (
int x = 0; x < (
width >> 1) - 1; x++) {
116 fill = bytestream2_get_byte(gb);
118 *(dst++) = (fill + bytestream2_peek_byte(gb) + 1) >> 1;
120 fill = bytestream2_get_byte(gb);
127 for (
int x = 0; x < (
width >> 1) - 1; x++) {
128 fill = bytestream2_get_byte(gb);
130 *(dst++) = (fill + bytestream2_peek_byte(gb) + 1) >> 1;
132 fill = bytestream2_get_byte(gb);
149 for (
int y = 0; y <
height - 2; y += 2) {
150 const uint8_t *
src1 = ptr;
151 uint8_t *dst = ptr + linesize;
152 const uint8_t *src2 = dst + linesize;
153 for (x = 0; x <
width - 2; x += 2) {
154 dst[x] = (
src1[x] + src2[x] + 1) >> 1;
155 dst[x + 1] = (
src1[x] + src2[x] +
src1[x + 2] + src2[x + 2] + 2) >> 2;
157 dst[x] = dst[x + 1] = (
src1[x] + src2[x] + 1) >> 1;
159 ptr += linesize << 1;
163 dst = ptr + linesize;
164 for (x = 0; x <
width - 2; x += 2) {
166 dst[x + 1] = (
src1[x] +
src1[x + 2] + 1) >> 1;
168 dst[x] = dst[x + 1] =
src1[x];
174 for (
int y =
height - 2; y >= 0; y -= 2) {
175 const uint8_t *
src = ptr + (y >> 1) * linesize;
176 uint8_t *dst = ptr + y * linesize;
179 for (
int x =
width - 4; x >= 0; x -= 2) {
180 dst[x] =
src[x >> 1];
181 dst[x + 1] = (
src[x >> 1] +
src[(x >> 1) + 1] + 1) >> 1;
190 int start =
s->streampos;
195 count = bytestream2_get_byte(gb) + 1;
199 for (
int j = 0; j < count; j++) {
200 const int bit = bytestream2_get_byteu(gb) + 1;
201 const int code = bytestream2_get_be16u(gb);
202 const int sym = bytestream2_get_byteu(gb);
214 s->bits,
sizeof(*
s->bits),
sizeof(*
s->bits),
215 s->codes,
sizeof(*
s->codes),
sizeof(*
s->codes),
216 s->syms,
sizeof(*
s->syms),
sizeof(*
s->syms), 0);
224 int target_res,
int curr_res)
230 int start =
s->streampos;
232 const int scaling = target_res - curr_res;
233 const uint8_t type2idx[] = { 0, 0xff, 1, 2 };
253 while (shiftreg != 0xfffffe) {
268 idx = type2idx[
type];
272 x2 = avctx->
width >> (scaling + !!idx);
273 for (
int x = 0; x < x2; x++) {
278 m =
get_vlc2(&
g,
s->vlc[idx].table,
s->vlc[idx].bits, 2);
287 s->streampos = (
s->streampos + 0x6000 + 2047) & ~0x7ff;
297 const uint8_t *buf = avpkt->
data;
300 uint8_t *ptr, *ptr1, *ptr2;
306 if (!memcmp(
"PCD_OPA", buf, 7)) {
309 "reading first thumbnail only\n");
310 }
else if (avpkt->
size < 786432) {
312 }
else if (memcmp(
"PCD_IPI", buf + 0x800, 7)) {
316 s->orientation =
s->thumbnails ? buf[12] & 3 : buf[0x48] & 3;
320 else if (avpkt->
size <= 788480)
323 s->resolution =
av_clip(4 -
s->lowres, 0, 4);
337 if (
s->resolution < 3) {
347 for (
int y = 0; y < avctx->
height; y += 2) {
371 if (
s->resolution == 4) {
380 s->streampos = 0xc2000;
381 for (
int n = 0; n < 3; n++) {
385 s->streampos = (
s->streampos + 2047) & ~0x3ff;
389 if (
s->resolution == 4) {
393 for (
int n = 0; n < 3; n++) {
397 s->streampos = (
s->streampos + 2047) & ~0x3ff;
407 for (
int y = 0; y < avctx->
height >> 1; y++) {
408 for (
int x = 0; x < avctx->
width >> 1; x++) {
438 for (
int i = 0;
i < 3;
i++)
444 #define OFFSET(x) offsetof(PhotoCDContext, x)
445 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
448 {
"lowres",
"Lower the decoding resolution by a power of two",
#define AV_LOG_WARNING
Something somehow does not look correct.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
static int get_bits_left(GetBitContext *gb)
enum AVColorSpace colorspace
YUV colorspace type.
static av_cold int photocd_decode_close(AVCodecContext *avctx)
const AVCodec ff_photocd_decoder
static int get_bits_count(const GetBitContext *s)
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
This structure describes decoded (raw) audio or video data.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
static av_noinline int decode_huff(AVCodecContext *avctx, AVFrame *frame, int target_res, int curr_res)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define bit(string, value)
static void skip_bits(GetBitContext *s, int n)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
int key_frame
1 -> keyframe, 0-> not
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static av_noinline void interp_pixels(uint8_t *ptr, int linesize, int width, int height)
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
static av_noinline void interp_lines(uint8_t *ptr, int linesize, int width, int height)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
void ff_free_vlc(VLC *vlc)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
static av_noinline int read_hufftable(AVCodecContext *avctx, VLC *vlc)
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
static av_noinline void interp_lowres(PhotoCDContext *s, AVFrame *picture, int width, int height)
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
static int photocd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static const ImageInfo img_info[6]
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static av_always_inline int bytestream2_tell(GetByteContext *g)
enum AVPictureType pict_type
Picture type of the frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static const AVClass photocd_class
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static const AVOption options[]
main external API structure.
static av_cold int photocd_decode_init(AVCodecContext *avctx)
static av_const int sign_extend(int val, unsigned bits)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
This structure stores compressed data.
int width
picture width / height.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B